entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
ConvBlock
import torch import torch.nn as nn from math import * import torch.nn.functional as F class ConvBlock(nn.Module): def __init__(self): super(ConvBlock, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from math import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf7, (4, 400), (400, 1), 0 ), primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6 class ConvBlockNew(nn.Module): def __init__(self): super(ConvBlockNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
a8252525/NIID-Bench
ConvBlock
false
3,007
[ "MIT" ]
0
33df8d3a7b941884eec3c7bd52adb8a9476eb282
https://github.com/a8252525/NIID-Bench/tree/33df8d3a7b941884eec3c7bd52adb8a9476eb282
Unet
import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, dropout=False, norm= 'batch', residual=True, activation='leakyrelu', transpose=False): super(ConvBlock, self).__init__() self.dropout = dropout self.residual = residual self.activation = activation self.transpose = transpose if self.dropout: self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.05) self.norm1 = None self.norm2 = None if norm == 'batch': self.norm1 = nn.BatchNorm2d(out_channels) self.norm2 = nn.BatchNorm2d(out_channels) elif norm == 'instance': self.norm1 = nn.InstanceNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) elif norm == 'mixed': self.norm1 = nn.BatchNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) if self.transpose: self.conv1 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, padding=1) else: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size= 3, padding=1) if self.activation == 'relu': self.actfun1 = nn.ReLU() self.actfun2 = nn.ReLU() elif self.activation == 'leakyrelu': self.actfun1 = nn.LeakyReLU() self.actfun2 = nn.LeakyReLU() elif self.activation == 'elu': self.actfun1 = nn.ELU() self.actfun2 = nn.ELU() elif self.activation == 'selu': self.actfun1 = nn.SELU() self.actfun2 = nn.SELU() def forward(self, x): ox = x x = self.conv1(x) if self.dropout: x = self.dropout1(x) if self.norm1: x = self.norm1(x) x = self.actfun1(x) x = self.conv2(x) if self.dropout: x = self.dropout2(x) if self.norm2: x = self.norm2(x) if self.residual: x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox. shape[1], x.shape[1]), :, :] x = self.actfun2(x) return x class Unet(nn.Module): def __init__(self, n_channel_in=1, n_channel_out=1, residual=False, down='conv', up='tconv', activation='selu'): super(Unet, self).__init__() self.residual = residual if down == 'maxpool': self.down1 = nn.MaxPool2d(kernel_size=2) self.down2 = nn.MaxPool2d(kernel_size=2) self.down3 = nn.MaxPool2d(kernel_size=2) self.down4 = nn.MaxPool2d(kernel_size=2) elif down == 'avgpool': self.down1 = nn.AvgPool2d(kernel_size=2) self.down2 = nn.AvgPool2d(kernel_size=2) self.down3 = nn.AvgPool2d(kernel_size=2) self.down4 = nn.AvgPool2d(kernel_size=2) elif down == 'conv': self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32) self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64) self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2, groups=128) self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2, groups=256) self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25 self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25 self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25 self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25 self.down1.bias.data = 0.01 * self.down1.bias.data + 0 self.down2.bias.data = 0.01 * self.down2.bias.data + 0 self.down3.bias.data = 0.01 * self.down3.bias.data + 0 self.down4.bias.data = 0.01 * self.down4.bias.data + 0 if up == 'bilinear' or up == 'nearest': self.up1 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up2 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up3 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up4 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) elif up == 'tconv': self.up1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, groups=256) self.up2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, groups=128) self.up3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2, groups=64) self.up4 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2, groups=32) self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25 self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25 self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25 self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25 self.up1.bias.data = 0.01 * self.up1.bias.data + 0 self.up2.bias.data = 0.01 * self.up2.bias.data + 0 self.up3.bias.data = 0.01 * self.up3.bias.data + 0 self.up4.bias.data = 0.01 * self.up4.bias.data + 0 self.conv1 = ConvBlock(n_channel_in, 32, residual, activation) self.conv2 = ConvBlock(32, 64, residual, activation) self.conv3 = ConvBlock(64, 128, residual, activation) self.conv4 = ConvBlock(128, 256, residual, activation) self.conv5 = ConvBlock(256, 256, residual, activation) self.conv6 = ConvBlock(2 * 256, 128, residual, activation) self.conv7 = ConvBlock(2 * 128, 64, residual, activation) self.conv8 = ConvBlock(2 * 64, 32, residual, activation) self.conv9 = ConvBlock(2 * 32, n_channel_out, residual, activation) if self.residual: self.convres = ConvBlock(n_channel_in, n_channel_out, residual, activation) def forward(self, x): c0 = x c1 = self.conv1(x) x = self.down1(c1) c2 = self.conv2(x) x = self.down2(c2) c3 = self.conv3(x) x = self.down3(c3) c4 = self.conv4(x) x = self.down4(c4) x = self.conv5(x) x = self.up1(x) x = torch.cat([x, c4], 1) x = self.conv6(x) x = self.up2(x) x = torch.cat([x, c3], 1) x = self.conv7(x) x = self.up3(x) x = torch.cat([x, c2], 1) x = self.conv8(x) x = self.up4(x) x = torch.cat([x, c1], 1) x = self.conv9(x) if self.residual: x = torch.add(x, self.convres(c0)) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 32 x3 = xindex x0 = xindex % 4096 x2 = xindex // 131072 tmp21 = tl.load(in_ptr0 + x3, None) tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp3, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp2, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + x3, tmp27, None) tl.store(out_ptr1 + x3, tmp30, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 64 x3 = xindex x2 = xindex // 65536 x4 = xindex % 65536 tmp21 = tl.load(in_ptr0 + x3, None) tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 32, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + 32768 * x2), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + 32768 * x2), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + x3, tmp27, None) tl.store(out_ptr1 + x3, tmp30, None) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 128 x3 = xindex x2 = xindex // 32768 x4 = xindex % 32768 tmp21 = tl.load(in_ptr0 + x3, None) tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 64, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + 16384 * x2), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + 16384 * x2), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + x3, tmp27, None) tl.store(out_ptr1 + x3, tmp30, None) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 256 x3 = xindex x2 = xindex // 16384 x4 = xindex % 16384 tmp21 = tl.load(in_ptr0 + x3, None) tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 128, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + 8192 * x2), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + 8192 * x2), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + x3, tmp27, None) tl.store(out_ptr1 + x3, tmp30, None) @triton.jit def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 512 x0 = xindex % 64 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 x2 = xindex // 8192 x4 = xindex % 8192 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 32768 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_18(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 x2 = xindex // 16384 x4 = xindex % 16384 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 65536 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 128 x0 = xindex % 1024 x2 = xindex // 131072 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_21(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_22(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 x2 = xindex // 32768 x4 = xindex % 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 131072 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 64 x0 = xindex % 4096 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_24(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.01 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tl.store(out_ptr0 + x0, tmp5, None) tl.store(out_ptr1 + x0, tmp8, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_25(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 x1 = xindex // 4096 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (x0 + 262144 * x1), None) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 0.01 tmp9 = tmp5 * tmp8 tmp10 = tl.where(tmp7, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp7, None) tl.store(out_ptr1 + x2, tmp10, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53 ) = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (64, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (256,), (1,)) assert_size_stride(primals_22, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_23, (256,), (1,)) assert_size_stride(primals_24, (256, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_25, (256,), (1,)) assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (256,), (1,)) assert_size_stride(primals_30, (256, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (128, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (64, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_41, (64,), (1,)) assert_size_stride(primals_42, (64, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_43, (64,), (1,)) assert_size_stride(primals_44, (32, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_45, (32,), (1,)) assert_size_stride(primals_46, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_47, (32,), (1,)) assert_size_stride(primals_48, (32, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_49, (32,), (1,)) assert_size_stride(primals_50, (1, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_51, (1,), (1,)) assert_size_stride(primals_52, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_53, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0, primals_3, buf1, buf2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_add_convolution_leaky_relu_1[grid(524288)](buf3, primals_5, primals_1, buf4, buf5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_2[grid(131072)](buf7, primals_7, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_3[grid(262144)](buf8, primals_9, buf9, buf10, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf13 = buf8 del buf8 triton_poi_fused_add_convolution_leaky_relu_4[grid(262144)](buf11, primals_11, buf7, buf12, buf13, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_5[grid(65536)](buf15, primals_13, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1)) buf17 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_6[grid(131072)](buf16, primals_15, buf17, buf18, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf19 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf20 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf21 = buf16 del buf16 triton_poi_fused_add_convolution_leaky_relu_7[grid(131072)](buf19, primals_17, buf15, buf20, buf21, 131072, XBLOCK=512, num_warps= 8, num_stages=1) del primals_17 buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=128, bias=None) assert_size_stride(buf22, (4, 128, 8, 8), (8192, 64, 8, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_8[grid(32768)](buf23, primals_19, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_convolution_leaky_relu_9[grid(65536)](buf24, primals_21, buf25, buf26, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1)) buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf29 = buf24 del buf24 triton_poi_fused_add_convolution_leaky_relu_10[grid(65536)](buf27, primals_23, buf23, buf28, buf29, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf27 del primals_23 buf30 = extern_kernels.convolution(buf29, primals_24, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=256, bias=None) assert_size_stride(buf30, (4, 256, 4, 4), (4096, 16, 4, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_11[grid(16384)](buf31, primals_25, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 4, 4), (4096, 16, 4, 1)) buf33 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool ) buf34 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_12[grid(16384)](buf32, primals_27, buf33, buf34, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 256, 4, 4), (4096, 16, 4, 1)) buf36 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool ) buf37 = buf32 del buf32 triton_poi_fused_add_convolution_leaky_relu_13[grid(16384)](buf35, primals_29, buf31, buf36, buf37, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_29 buf38 = extern_kernels.convolution(buf37, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=256, bias=None) assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1)) buf39 = reinterpret_tensor(buf19, (4, 512, 8, 8), (32768, 64, 8, 1), 0) del buf19 triton_poi_fused_cat_14[grid(131072)](buf38, primals_31, buf29, buf39, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf40 = extern_kernels.convolution(buf39, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 128, 8, 8), (8192, 64, 8, 1)) buf41 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool ) buf42 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_15[grid(32768)](buf40, primals_33, buf41, buf42, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_33 buf43 = extern_kernels.convolution(buf42, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1)) buf44 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool ) buf45 = buf40 del buf40 triton_poi_fused_add_convolution_leaky_relu_16[grid(32768)](buf43, primals_35, buf39, buf44, buf45, 32768, XBLOCK=128, num_warps=4, num_stages=1) del buf43 del primals_35 buf46 = extern_kernels.convolution(buf45, primals_36, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=128, bias=None) assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1)) buf47 = reinterpret_tensor(buf11, (4, 256, 16, 16), (65536, 256, 16, 1), 0) del buf11 triton_poi_fused_cat_17[grid(262144)](buf46, primals_37, buf21, buf47, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_37 buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 64, 16, 16), (16384, 256, 16, 1)) buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) buf50 = reinterpret_tensor(buf38, (4, 64, 16, 16), (16384, 256, 16, 1), 0) del buf38 triton_poi_fused_convolution_leaky_relu_18[grid(65536)](buf48, primals_39, buf49, buf50, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_39 buf51 = extern_kernels.convolution(buf50, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 64, 16, 16), (16384, 256, 16, 1)) buf52 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) buf53 = buf48 del buf48 triton_poi_fused_add_convolution_leaky_relu_19[grid(65536)](buf51, primals_41, buf47, buf52, buf53, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf51 del primals_41 buf54 = extern_kernels.convolution(buf53, primals_42, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf55 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0) del buf3 triton_poi_fused_cat_20[grid(524288)](buf54, primals_43, buf13, buf55, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf54 del primals_43 buf56 = extern_kernels.convolution(buf55, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf57 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool) buf58 = reinterpret_tensor(buf46, (4, 32, 32, 32), (32768, 1024, 32, 1), 0) del buf46 triton_poi_fused_convolution_leaky_relu_21[grid(131072)](buf56, primals_45, buf57, buf58, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_45 buf59 = extern_kernels.convolution(buf58, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf60 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool) buf61 = buf56 del buf56 triton_poi_fused_add_convolution_leaky_relu_22[grid(131072)](buf59, primals_47, buf55, buf60, buf61, 131072, XBLOCK=1024, num_warps =4, num_stages=1) del buf59 del primals_47 buf62 = extern_kernels.convolution(buf61, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_23[grid(1048576)](buf62, primals_49, buf5, buf63, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del buf62 del primals_49 buf64 = extern_kernels.convolution(buf63, primals_50, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf65 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) buf66 = reinterpret_tensor(buf35, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf35 triton_poi_fused_convolution_leaky_relu_24[grid(16384)](buf64, primals_51, buf65, buf66, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_51 buf67 = extern_kernels.convolution(buf66, primals_52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf68 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) buf69 = buf64 del buf64 triton_poi_fused_add_convolution_leaky_relu_25[grid(16384)](buf67, primals_53, buf63, buf68, buf69, 16384, XBLOCK=128, num_warps=4, num_stages=1) del buf67 del primals_53 return (buf69, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, buf1, buf2, buf4, buf5, buf7, buf9, buf10, buf12, buf13, buf15, buf17, buf18, buf20, buf21, buf23, buf25, buf26, buf28, buf29, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53, buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, dropout=False, norm= 'batch', residual=True, activation='leakyrelu', transpose=False): super(ConvBlock, self).__init__() self.dropout = dropout self.residual = residual self.activation = activation self.transpose = transpose if self.dropout: self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.05) self.norm1 = None self.norm2 = None if norm == 'batch': self.norm1 = nn.BatchNorm2d(out_channels) self.norm2 = nn.BatchNorm2d(out_channels) elif norm == 'instance': self.norm1 = nn.InstanceNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) elif norm == 'mixed': self.norm1 = nn.BatchNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) if self.transpose: self.conv1 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, padding=1) else: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size= 3, padding=1) if self.activation == 'relu': self.actfun1 = nn.ReLU() self.actfun2 = nn.ReLU() elif self.activation == 'leakyrelu': self.actfun1 = nn.LeakyReLU() self.actfun2 = nn.LeakyReLU() elif self.activation == 'elu': self.actfun1 = nn.ELU() self.actfun2 = nn.ELU() elif self.activation == 'selu': self.actfun1 = nn.SELU() self.actfun2 = nn.SELU() def forward(self, x): ox = x x = self.conv1(x) if self.dropout: x = self.dropout1(x) if self.norm1: x = self.norm1(x) x = self.actfun1(x) x = self.conv2(x) if self.dropout: x = self.dropout2(x) if self.norm2: x = self.norm2(x) if self.residual: x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox. shape[1], x.shape[1]), :, :] x = self.actfun2(x) return x class UnetNew(nn.Module): def __init__(self, n_channel_in=1, n_channel_out=1, residual=False, down='conv', up='tconv', activation='selu'): super(UnetNew, self).__init__() self.residual = residual if down == 'maxpool': self.down1 = nn.MaxPool2d(kernel_size=2) self.down2 = nn.MaxPool2d(kernel_size=2) self.down3 = nn.MaxPool2d(kernel_size=2) self.down4 = nn.MaxPool2d(kernel_size=2) elif down == 'avgpool': self.down1 = nn.AvgPool2d(kernel_size=2) self.down2 = nn.AvgPool2d(kernel_size=2) self.down3 = nn.AvgPool2d(kernel_size=2) self.down4 = nn.AvgPool2d(kernel_size=2) elif down == 'conv': self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32) self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64) self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2, groups=128) self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2, groups=256) self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25 self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25 self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25 self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25 self.down1.bias.data = 0.01 * self.down1.bias.data + 0 self.down2.bias.data = 0.01 * self.down2.bias.data + 0 self.down3.bias.data = 0.01 * self.down3.bias.data + 0 self.down4.bias.data = 0.01 * self.down4.bias.data + 0 if up == 'bilinear' or up == 'nearest': self.up1 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up2 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up3 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up4 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) elif up == 'tconv': self.up1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, groups=256) self.up2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, groups=128) self.up3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2, groups=64) self.up4 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2, groups=32) self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25 self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25 self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25 self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25 self.up1.bias.data = 0.01 * self.up1.bias.data + 0 self.up2.bias.data = 0.01 * self.up2.bias.data + 0 self.up3.bias.data = 0.01 * self.up3.bias.data + 0 self.up4.bias.data = 0.01 * self.up4.bias.data + 0 self.conv1 = ConvBlock(n_channel_in, 32, residual, activation) self.conv2 = ConvBlock(32, 64, residual, activation) self.conv3 = ConvBlock(64, 128, residual, activation) self.conv4 = ConvBlock(128, 256, residual, activation) self.conv5 = ConvBlock(256, 256, residual, activation) self.conv6 = ConvBlock(2 * 256, 128, residual, activation) self.conv7 = ConvBlock(2 * 128, 64, residual, activation) self.conv8 = ConvBlock(2 * 64, 32, residual, activation) self.conv9 = ConvBlock(2 * 32, n_channel_out, residual, activation) if self.residual: self.convres = ConvBlock(n_channel_in, n_channel_out, residual, activation) def forward(self, input_0): primals_6 = self.down1.weight primals_3 = self.down1.bias primals_12 = self.down2.weight primals_9 = self.down2.bias primals_18 = self.down3.weight primals_15 = self.down3.bias primals_24 = self.down4.weight primals_21 = self.down4.bias primals_30 = self.up1.weight primals_23 = self.up1.bias primals_36 = self.up2.weight primals_17 = self.up2.bias primals_42 = self.up3.weight primals_11 = self.up3.bias primals_48 = self.up4.weight primals_5 = self.up4.bias primals_2 = self.conv1.conv1.weight primals_7 = self.conv1.conv1.bias primals_4 = self.conv1.conv2.weight primals_45 = self.conv1.conv2.bias primals_8 = self.conv2.conv1.weight primals_13 = self.conv2.conv1.bias primals_10 = self.conv2.conv2.weight primals_39 = self.conv2.conv2.bias primals_14 = self.conv3.conv1.weight primals_19 = self.conv3.conv1.bias primals_16 = self.conv3.conv2.weight primals_33 = self.conv3.conv2.bias primals_20 = self.conv4.conv1.weight primals_25 = self.conv4.conv1.bias primals_22 = self.conv4.conv2.weight primals_27 = self.conv4.conv2.bias primals_26 = self.conv5.conv1.weight primals_29 = self.conv5.conv1.bias primals_28 = self.conv5.conv2.weight primals_31 = self.conv5.conv2.bias primals_32 = self.conv6.conv1.weight primals_35 = self.conv6.conv1.bias primals_34 = self.conv6.conv2.weight primals_37 = self.conv6.conv2.bias primals_38 = self.conv7.conv1.weight primals_41 = self.conv7.conv1.bias primals_40 = self.conv7.conv2.weight primals_43 = self.conv7.conv2.bias primals_44 = self.conv8.conv1.weight primals_47 = self.conv8.conv1.bias primals_46 = self.conv8.conv2.weight primals_49 = self.conv8.conv2.bias primals_50 = self.conv9.conv1.weight primals_51 = self.conv9.conv1.bias primals_52 = self.conv9.conv2.weight primals_53 = self.conv9.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53]) return output[0]
XinweiYu/noise2self
Unet
false
3,008
[ "MIT" ]
0
04e0379a67e1cb0c807abd3f8d4fd1666db5a793
https://github.com/XinweiYu/noise2self/tree/04e0379a67e1cb0c807abd3f8d4fd1666db5a793
ResNetV2
import torch import torch.nn as nn from collections import OrderedDict import torch.nn.functional as F def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-05) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv1 = conv1x1(cin, cmid, bias=False) self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv2 = conv3x3(cmid, cmid, stride, bias=False) self.gn3 = nn.GroupNorm(32, cout, eps=1e-06) self.conv3 = conv1x1(cmid, cout, bias=False) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride, bias=False) self.gn_proj = nn.GroupNorm(cout, cout) def forward(self, x): residual = x if hasattr(self, 'downsample'): residual = self.downsample(x) residual = self.gn_proj(residual) y = self.relu(self.gn1(self.conv1(x))) y = self.relu(self.gn2(self.conv2(y))) y = self.gn3(self.conv3(y)) y = self.relu(residual + y) return y def load_from(self, weights, n_block, n_unit): conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel') ], conv=True) conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel') ], conv=True) conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel') ], conv=True) gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')]) gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')]) gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')]) gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')]) gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')]) gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')]) self.conv1.weight.copy_(conv1_weight) self.conv2.weight.copy_(conv2_weight) self.conv3.weight.copy_(conv3_weight) self.gn1.weight.copy_(gn1_weight.view(-1)) self.gn1.bias.copy_(gn1_bias.view(-1)) self.gn2.weight.copy_(gn2_weight.view(-1)) self.gn2.bias.copy_(gn2_bias.view(-1)) self.gn3.weight.copy_(gn3_weight.view(-1)) self.gn3.bias.copy_(gn3_bias.view(-1)) if hasattr(self, 'downsample'): proj_conv_weight = np2th(weights[pjoin(n_block, n_unit, 'conv_proj/kernel')], conv=True) proj_gn_weight = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/scale')]) proj_gn_bias = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/bias')]) self.downsample.weight.copy_(proj_conv_weight) self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) class ResNetV2(nn.Module): """Implementation of Pre-activation (v2) ResNet mode.""" def __init__(self, block_units, width_factor): super().__init__() width = int(64 * width_factor) self.width = width self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn. GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True )), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width * 4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 4, cout=width * 4, cmid=width)) for i in range(2, block_units[0 ] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8, cout=width * 8, cmid=width * 2)) for i in range(2, block_units[ 1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16, cout=width * 16, cmid=width * 4)) for i in range(2, block_units [2] + 1)])))])) def forward(self, x): x = self.root(x) x = self.body(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'block_units': [4, 4, 4], 'width_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from collections import OrderedDict import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 768 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 rnumel = 147 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 147, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 147.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = rindex // 8 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 262144 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = xindex // 262144 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 8192.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = xindex // 256 % 15 x2 = xindex // 3840 % 15 x3 = xindex // 57600 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp5 = tl.load(in_ptr0 + (8192 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp7 = tl.load(in_ptr0 + (8448 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp9 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp11 = tl.load(in_ptr0 + (16384 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp13 = tl.load(in_ptr0 + (16640 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp15 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x4, tmp16, xmask) tl.store(out_ptr1 + x4, tmp41, xmask) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 225 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex x0 = xindex % 1024 x1 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 1024 * r2 + 230400 * x1), rmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 225, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 225.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x3, tmp21, None) tl.store(out_ptr0 + x3, tmp10, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 1800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = rindex // 8 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 57600 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 1800.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 256 x2 = xindex // 57600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1800.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 256 rnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2304.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2304 * x0), tmp12, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 7200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 230400 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 7200.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 230400 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 225.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 7200.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 230400 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 7200.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2048 x1 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r2 + 131072 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 64, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 64.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x3, tmp18, None) tl.store(out_ptr0 + x3, tmp8, None) tl.store(out_ptr1 + x3, tmp13, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 3600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 115200 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 3600.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 115200 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 3600.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4608.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4608 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = rindex // 16 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 64 r3 = rindex // 64 tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 2048 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 2048 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 4096.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 4096.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask) @triton.jit def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 65536 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 16, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 16.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x3, tmp18, None) tl.store(out_ptr0 + x3, tmp8, None) tl.store(out_ptr1 + x3, tmp13, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 9216.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 9216 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 32 r3 = rindex // 32 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 16384 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 128 r3 = rindex // 128 tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 2048.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = yindex // 16 y0 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + (32 * y1 + x2 // 128), ymask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * y1 + x2 // 128), ymask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1, 1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp17, ymask) @triton.jit def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = yindex // 4096 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (y0 + 4096 * x2 + 65536 * y1), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121) = args args.clear() assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256,), (1,)) assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_6, (1024,), (1,)) assert_size_stride(primals_7, (1024,), (1,)) assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256,), (1,)) assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_12, (256,), (1,)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_15, (1024,), (1,)) assert_size_stride(primals_16, (1024,), (1,)) assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_18, (256,), (1,)) assert_size_stride(primals_19, (256,), (1,)) assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_21, (256,), (1,)) assert_size_stride(primals_22, (256,), (1,)) assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_24, (1024,), (1,)) assert_size_stride(primals_25, (1024,), (1,)) assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256,), (1,)) assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_30, (256,), (1,)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_33, (1024,), (1,)) assert_size_stride(primals_34, (1024,), (1,)) assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_36, (256,), (1,)) assert_size_stride(primals_37, (256,), (1,)) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256,), (1,)) assert_size_stride(primals_40, (256,), (1,)) assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_42, (1024,), (1,)) assert_size_stride(primals_43, (1024,), (1,)) assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_45, (2048,), (1,)) assert_size_stride(primals_46, (2048,), (1,)) assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_48, (512,), (1,)) assert_size_stride(primals_49, (512,), (1,)) assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_51, (512,), (1,)) assert_size_stride(primals_52, (512,), (1,)) assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_54, (2048,), (1,)) assert_size_stride(primals_55, (2048,), (1,)) assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_57, (512,), (1,)) assert_size_stride(primals_58, (512,), (1,)) assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_60, (512,), (1,)) assert_size_stride(primals_61, (512,), (1,)) assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_63, (2048,), (1,)) assert_size_stride(primals_64, (2048,), (1,)) assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_66, (512,), (1,)) assert_size_stride(primals_67, (512,), (1,)) assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_69, (512,), (1,)) assert_size_stride(primals_70, (512,), (1,)) assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_72, (2048,), (1,)) assert_size_stride(primals_73, (2048,), (1,)) assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_75, (512,), (1,)) assert_size_stride(primals_76, (512,), (1,)) assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_78, (512,), (1,)) assert_size_stride(primals_79, (512,), (1,)) assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_81, (2048,), (1,)) assert_size_stride(primals_82, (2048,), (1,)) assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_84, (4096,), (1,)) assert_size_stride(primals_85, (4096,), (1,)) assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_87, (1024,), (1,)) assert_size_stride(primals_88, (1024,), (1,)) assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_90, (1024,), (1,)) assert_size_stride(primals_91, (1024,), (1,)) assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_93, (4096,), (1,)) assert_size_stride(primals_94, (4096,), (1,)) assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_96, (1024,), (1,)) assert_size_stride(primals_97, (1024,), (1,)) assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_99, (1024,), (1,)) assert_size_stride(primals_100, (1024,), (1,)) assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_102, (4096,), (1,)) assert_size_stride(primals_103, (4096,), (1,)) assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_105, (1024,), (1,)) assert_size_stride(primals_106, (1024,), (1,)) assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_108, (1024,), (1,)) assert_size_stride(primals_109, (1024,), (1,)) assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_111, (4096,), (1,)) assert_size_stride(primals_112, (4096,), (1,)) assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_114, (1024,), (1,)) assert_size_stride(primals_115, (1024,), (1,)) assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_117, (1024,), (1,)) assert_size_stride(primals_118, (1024,), (1,)) assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_120, (4096,), (1,)) assert_size_stride(primals_121, (4096,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_11, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_11 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_20, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_29, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_29 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_38, buf5, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_38 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_50, buf6, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_50 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_59, buf7, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_59 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_68, buf8, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_68 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_77, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_77 buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_89, buf10, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_89 buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_98, buf11, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_98 buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_107, buf12, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_107 buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_116, buf13, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_116 buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf15 buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) triton_per_fused_add_div_sqrt_sub_var_mean_5[grid(256)](buf17, buf0, buf18, 256, 147, XBLOCK=1, num_warps=2, num_stages=1) buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_6[grid(128)](buf19, buf20, buf21, buf23, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256), torch.float32) triton_poi_fused_native_group_norm_relu_7[grid(1048576)](buf19, buf20, buf21, primals_3, primals_4, buf24, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_4 buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf24, buf25, buf26, 230400, XBLOCK=512, num_warps=8, num_stages=1) buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf28 buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf30, primals_5, buf31, 1024, 256, num_warps=2, num_stages=1) buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) triton_per_fused_native_group_norm_10[grid(4096)](buf32, buf33, buf34, buf36, 4096, 225, XBLOCK=1, num_warps=2, num_stages=1) buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf38 buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(256)](buf40, primals_8, buf41, 256, 256, num_warps=2, num_stages=1) buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf43 = buf21 del buf21 buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf42, buf43, buf44, buf46, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf42, buf43, buf44, primals_9, primals_10, buf47, 230400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_10 buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf49 buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf51, buf2, buf52, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf54 = buf44 del buf44 buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf53, buf54, buf55, buf57, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf53, buf54, buf55, primals_12, primals_13, buf58, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_13 buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf60 buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf62, primals_14, buf63, 1024, 256, num_warps=2, num_stages=1) buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf65 = buf55 del buf55 buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_15[grid(128)](buf64, buf65, buf66, buf68, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) buf70 = buf69 del buf69 triton_poi_fused_add_native_group_norm_relu_16[grid(921600)](buf70, buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66, primals_15, primals_16, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_16 del primals_7 buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf72 buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf74, primals_17, buf75, 256, 1024, num_warps=8, num_stages=1) buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf77 = buf66 del buf66 buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf76, buf77, buf78, buf80, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf76, buf77, buf78, primals_18, primals_19, buf81, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_19 buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf83 buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf85, buf3, buf86, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf88 = buf78 del buf78 buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf87, buf88, buf89, buf91, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf87, buf88, buf89, primals_21, primals_22, buf92, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_22 buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf94 buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf96, primals_23, buf97, 1024, 256, num_warps=2, num_stages=1) buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf99 = buf89 del buf89 buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf98, buf99, buf100, buf102, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf70, buf98, buf99, buf100, primals_24, primals_25, buf103, 921600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf105 buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf107, primals_26, buf108, 256, 1024, num_warps=8, num_stages=1) buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf110 = buf100 del buf100 buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf109, buf110, buf111, buf113, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf109, buf110, buf111, primals_27, primals_28, buf114, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_28 buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf116 buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf118, buf4, buf119, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf121 = buf111 del buf111 buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf120, buf121, buf122, buf124, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf120, buf121, buf122, primals_30, primals_31, buf125, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_31 buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf127 buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf129, primals_32, buf130, 1024, 256, num_warps=2, num_stages=1) buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf132 = buf122 del buf122 buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf131, buf132, buf133, buf135, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf103, buf131, buf132, buf133, primals_33, primals_34, buf136, 921600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_34 buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf138 buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf140, primals_35, buf141, 256, 1024, num_warps=8, num_stages=1) buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf143 = buf133 del buf133 buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf142, buf143, buf144, buf146, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf142, buf143, buf144, primals_36, primals_37, buf147, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_37 buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf149 buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf151, buf5, buf152, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf154 = buf144 del buf144 buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf153, buf154, buf155, buf157, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf153, buf154, buf155, primals_39, primals_40, buf158, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_40 buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf160 buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf162, primals_41, buf163, 1024, 256, num_warps=2, num_stages=1) buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf165 = buf155 del buf155 buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf164, buf165, buf166, buf168, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf136, buf164, buf165, buf166, primals_42, primals_43, buf169, 921600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf171 buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf173, primals_44, buf174, 2048, 1024, num_warps=8, num_stages=1) buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) triton_per_fused_native_group_norm_20[grid(8192)](buf175, buf176, buf177, buf179, 8192, 64, XBLOCK=8, num_warps=4, num_stages=1) buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf181 buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_21[grid(512)](buf183, primals_47, buf184, 512, 1024, num_warps=8, num_stages=1) buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512)) buf186 = buf166 del buf166 buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_22[grid(128)](buf185, buf186, buf187, buf189, 128, 3600, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512 ), torch.float32) triton_poi_fused_native_group_norm_relu_23[grid(460800)](buf185, buf186, buf187, primals_48, primals_49, buf190, 460800, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_49 buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf192 buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf194, buf6, buf195, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf197 = buf187 del buf187 buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf196, buf197, buf198, buf200, 128, 1024, num_warps=8, num_stages=1) buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf196, buf197, buf198, primals_51, primals_52, buf201, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_52 buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf203 buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf205, primals_53, buf206, 2048, 512, num_warps=4, num_stages=1) buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf208 = buf198 del buf198 buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf207, buf208, buf209, buf211, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) buf213 = buf212 del buf212 triton_poi_fused_add_native_group_norm_relu_29[grid(524288)](buf213, buf175, buf176, buf177, primals_45, primals_46, buf207, buf208, buf209, primals_54, primals_55, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf177 del primals_46 del primals_55 buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf215 buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf217, primals_56, buf218, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf220 = buf209 del buf209 buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf219, buf220, buf221, buf223, 128, 1024, num_warps=8, num_stages=1) buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf219, buf220, buf221, primals_57, primals_58, buf224, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_58 buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf226 buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf228, buf7, buf229, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf231 = buf221 del buf221 buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf230, buf231, buf232, buf234, 128, 1024, num_warps=8, num_stages=1) buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf230, buf231, buf232, primals_60, primals_61, buf235, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_61 buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf237 buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf239, primals_62, buf240, 2048, 512, num_warps=4, num_stages=1) buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf242 = buf232 del buf232 buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf241, buf242, buf243, buf245, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf213, buf241, buf242, buf243, primals_63, primals_64, buf246, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_64 buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf248 buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf250, primals_65, buf251, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf253 = buf243 del buf243 buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf252, buf253, buf254, buf256, 128, 1024, num_warps=8, num_stages=1) buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf252, buf253, buf254, primals_66, primals_67, buf257, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_67 buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf259 buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf261, buf8, buf262, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf264 = buf254 del buf254 buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf263, buf264, buf265, buf267, 128, 1024, num_warps=8, num_stages=1) buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf263, buf264, buf265, primals_69, primals_70, buf268, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_70 buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf270 buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf272, primals_71, buf273, 2048, 512, num_warps=4, num_stages=1) buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf275 = buf265 del buf265 buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf274, buf275, buf276, buf278, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf246, buf274, buf275, buf276, primals_72, primals_73, buf279, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_73 buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf281 buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf283, primals_74, buf284, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf286 = buf276 del buf276 buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf285, buf286, buf287, buf289, 128, 1024, num_warps=8, num_stages=1) buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf285, buf286, buf287, primals_75, primals_76, buf290, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_76 buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf292 buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf294, buf9, buf295, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf297 = buf287 del buf287 buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf296, buf297, buf298, buf300, 128, 1024, num_warps=8, num_stages=1) buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf296, buf297, buf298, primals_78, primals_79, buf301, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_79 buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf303 buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf305, primals_80, buf306, 2048, 512, num_warps=4, num_stages=1) buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf308 = buf298 del buf298 buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf307, buf308, buf309, buf311, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf279, buf307, buf308, buf309, primals_81, primals_82, buf312, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_82 buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096, 4096), 0) del buf34 buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf314 buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf316, primals_83, buf317, 4096, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) triton_per_fused_native_group_norm_33[grid(16384)](buf318, buf319, buf320, buf322, 16384, 16, XBLOCK=32, num_warps=4, num_stages=1) buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf324 buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_34[grid(1024)](buf326, primals_86, buf327, 1024, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024)) buf329 = buf309 del buf309 buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_35[grid(128)](buf328, buf329, buf330, buf332, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_36[grid(262144)](buf328, buf329, buf330, primals_87, primals_88, buf333, 262144, XBLOCK= 512, num_warps=8, num_stages=1) del primals_88 buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf335 buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf337, buf10, buf338, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf340 = buf330 del buf330 buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf339, buf340, buf341, buf343, 128, 512, num_warps=4, num_stages=1) buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf339, buf340, buf341, primals_90, primals_91, buf344, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_91 buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf346 buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf348, primals_92, buf349, 4096, 1024, num_warps=8, num_stages=1) buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf351 = buf341 del buf341 buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf350, buf351, buf352, buf354, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) buf356 = buf355 del buf355 triton_poi_fused_add_native_group_norm_relu_42[grid(262144)](buf356, buf318, buf319, buf320, primals_84, primals_85, buf350, buf351, buf352, primals_93, primals_94, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf320 del primals_85 del primals_94 buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf358 buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf360, primals_95, buf361, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf363 = buf352 del buf352 buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf362, buf363, buf364, buf366, 128, 512, num_warps=4, num_stages=1) buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf362, buf363, buf364, primals_96, primals_97, buf367, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_97 buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf369 buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf371, buf11, buf372, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf374 = buf364 del buf364 buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf373, buf374, buf375, buf377, 128, 512, num_warps=4, num_stages=1) buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf373, buf374, buf375, primals_99, primals_100, buf378, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_100 buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf380 buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf382, primals_101, buf383, 4096, 1024, num_warps=8, num_stages=1) buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf385 = buf375 del buf375 buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf384, buf385, buf386, buf388, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf356, buf384, buf385, buf386, primals_102, primals_103, buf389, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_103 buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf391 buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf393, primals_104, buf394, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf396 = buf386 del buf386 buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf395, buf396, buf397, buf399, 128, 512, num_warps=4, num_stages=1) buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf395, buf396, buf397, primals_105, primals_106, buf400, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_106 buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf402 buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf404, buf12, buf405, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf407 = buf397 del buf397 buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf406, buf407, buf408, buf410, 128, 512, num_warps=4, num_stages=1) buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf406, buf407, buf408, primals_108, primals_109, buf411, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_109 buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf413 buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf415, primals_110, buf416, 4096, 1024, num_warps=8, num_stages=1) buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf418 = buf408 del buf408 buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf417, buf418, buf419, buf421, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf389, buf417, buf418, buf419, primals_111, primals_112, buf422, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_112 buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf424 buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf426, primals_113, buf427, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf429 = buf419 del buf419 buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf428, buf429, buf430, buf432, 128, 512, num_warps=4, num_stages=1) buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf428, buf429, buf430, primals_114, primals_115, buf433, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_115 buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf435 buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf437, buf13, buf438, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf440 = buf430 del buf430 buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf439, buf440, buf441, buf443, 128, 512, num_warps=4, num_stages=1) buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf439, buf440, buf441, primals_117, primals_118, buf444, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_118 buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf446 buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf448, primals_119, buf449, 4096, 1024, num_warps=8, num_stages=1) buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf451 = buf441 del buf441 buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf450, buf451, buf452, buf454, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1), torch.float32) triton_poi_fused_add_native_group_norm_relu_45[grid(64, 4096)](buf422, buf450, buf451, buf452, primals_120, primals_121, buf455, 64, 4096, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del buf452 del primals_121 buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.bool) triton_poi_fused_threshold_backward_46[grid(16384, 16)](buf455, buf456, 16384, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8, primals_9, buf2, primals_12, primals_14, primals_15, primals_17, primals_18, buf3, primals_21, primals_23, primals_24, primals_26, primals_27, buf4, primals_30, primals_32, primals_33, primals_35, primals_36, buf5, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, buf6, primals_51, primals_53, primals_54, primals_56, primals_57, buf7, primals_60, primals_62, primals_63, primals_65, primals_66, buf8, primals_69, primals_71, primals_72, primals_74, primals_75, buf9, primals_78, primals_80, primals_81, primals_83, primals_84, primals_86, primals_87, buf10, primals_90, primals_92, primals_93, primals_95, primals_96, buf11, primals_99, primals_101, primals_102, primals_104, primals_105, buf12, primals_108, primals_110, primals_111, primals_113, primals_114, buf13, primals_117, primals_119, primals_120, buf17, buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0), reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26, buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1), 0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40, buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0), reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52, buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0), reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63, buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0), reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75, buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0), reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86, buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0), reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97, buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0), reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107, buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0), reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118, buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0), reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129, buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0), reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140, buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0), reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151, buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0), reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162, buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0), reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173, buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0), reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0), reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0), reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250, buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0), reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261, buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0), reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272, buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0), reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283, buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0), reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294, buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0), reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305, buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0), reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316, buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0), reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327, buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0), reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337, buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0), reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348, buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360, buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0), reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371, buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0), reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382, buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415, buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0), reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426, buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0), reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437, buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0), reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448, buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0), reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456) def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-05) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv1 = conv1x1(cin, cmid, bias=False) self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv2 = conv3x3(cmid, cmid, stride, bias=False) self.gn3 = nn.GroupNorm(32, cout, eps=1e-06) self.conv3 = conv1x1(cmid, cout, bias=False) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride, bias=False) self.gn_proj = nn.GroupNorm(cout, cout) def forward(self, x): residual = x if hasattr(self, 'downsample'): residual = self.downsample(x) residual = self.gn_proj(residual) y = self.relu(self.gn1(self.conv1(x))) y = self.relu(self.gn2(self.conv2(y))) y = self.gn3(self.conv3(y)) y = self.relu(residual + y) return y def load_from(self, weights, n_block, n_unit): conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel') ], conv=True) conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel') ], conv=True) conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel') ], conv=True) gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')]) gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')]) gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')]) gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')]) gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')]) gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')]) self.conv1.weight.copy_(conv1_weight) self.conv2.weight.copy_(conv2_weight) self.conv3.weight.copy_(conv3_weight) self.gn1.weight.copy_(gn1_weight.view(-1)) self.gn1.bias.copy_(gn1_bias.view(-1)) self.gn2.weight.copy_(gn2_weight.view(-1)) self.gn2.bias.copy_(gn2_bias.view(-1)) self.gn3.weight.copy_(gn3_weight.view(-1)) self.gn3.bias.copy_(gn3_bias.view(-1)) if hasattr(self, 'downsample'): proj_conv_weight = np2th(weights[pjoin(n_block, n_unit, 'conv_proj/kernel')], conv=True) proj_gn_weight = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/scale')]) proj_gn_bias = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/bias')]) self.downsample.weight.copy_(proj_conv_weight) self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) class ResNetV2New(nn.Module): """Implementation of Pre-activation (v2) ResNet mode.""" def __init__(self, block_units, width_factor): super().__init__() width = int(64 * width_factor) self.width = width self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn. GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True )), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width * 4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 4, cout=width * 4, cmid=width)) for i in range(2, block_units[0 ] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8, cout=width * 8, cmid=width * 2)) for i in range(2, block_units[ 1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16, cout=width * 16, cmid=width * 4)) for i in range(2, block_units [2] + 1)])))])) def forward(self, input_0): primals_1 = self.root.conv.weight primals_3 = self.root.gn.weight primals_4 = self.root.gn.bias primals_9 = self.body.block1.unit1.gn1.weight primals_10 = self.body.block1.unit1.gn1.bias primals_8 = self.body.block1.unit1.conv1.weight primals_12 = self.body.block1.unit1.gn2.weight primals_13 = self.body.block1.unit1.gn2.bias primals_11 = self.body.block1.unit1.conv2.weight primals_6 = self.body.block1.unit1.gn3.weight primals_7 = self.body.block1.unit1.gn3.bias primals_5 = self.body.block1.unit1.conv3.weight primals_14 = self.body.block1.unit1.downsample.weight primals_15 = self.body.block1.unit1.gn_proj.weight primals_16 = self.body.block1.unit1.gn_proj.bias primals_18 = self.body.block1.unit2.gn1.weight primals_19 = self.body.block1.unit2.gn1.bias primals_17 = self.body.block1.unit2.conv1.weight primals_21 = self.body.block1.unit2.gn2.weight primals_22 = self.body.block1.unit2.gn2.bias primals_20 = self.body.block1.unit2.conv2.weight primals_24 = self.body.block1.unit2.gn3.weight primals_25 = self.body.block1.unit2.gn3.bias primals_23 = self.body.block1.unit2.conv3.weight primals_27 = self.body.block1.unit3.gn1.weight primals_28 = self.body.block1.unit3.gn1.bias primals_26 = self.body.block1.unit3.conv1.weight primals_30 = self.body.block1.unit3.gn2.weight primals_31 = self.body.block1.unit3.gn2.bias primals_29 = self.body.block1.unit3.conv2.weight primals_33 = self.body.block1.unit3.gn3.weight primals_34 = self.body.block1.unit3.gn3.bias primals_32 = self.body.block1.unit3.conv3.weight primals_36 = self.body.block1.unit4.gn1.weight primals_37 = self.body.block1.unit4.gn1.bias primals_35 = self.body.block1.unit4.conv1.weight primals_39 = self.body.block1.unit4.gn2.weight primals_40 = self.body.block1.unit4.gn2.bias primals_38 = self.body.block1.unit4.conv2.weight primals_42 = self.body.block1.unit4.gn3.weight primals_43 = self.body.block1.unit4.gn3.bias primals_41 = self.body.block1.unit4.conv3.weight primals_48 = self.body.block2.unit1.gn1.weight primals_49 = self.body.block2.unit1.gn1.bias primals_47 = self.body.block2.unit1.conv1.weight primals_51 = self.body.block2.unit1.gn2.weight primals_52 = self.body.block2.unit1.gn2.bias primals_50 = self.body.block2.unit1.conv2.weight primals_45 = self.body.block2.unit1.gn3.weight primals_46 = self.body.block2.unit1.gn3.bias primals_53 = self.body.block2.unit1.conv3.weight primals_44 = self.body.block2.unit1.downsample.weight primals_54 = self.body.block2.unit1.gn_proj.weight primals_55 = self.body.block2.unit1.gn_proj.bias primals_57 = self.body.block2.unit2.gn1.weight primals_58 = self.body.block2.unit2.gn1.bias primals_56 = self.body.block2.unit2.conv1.weight primals_60 = self.body.block2.unit2.gn2.weight primals_61 = self.body.block2.unit2.gn2.bias primals_59 = self.body.block2.unit2.conv2.weight primals_63 = self.body.block2.unit2.gn3.weight primals_64 = self.body.block2.unit2.gn3.bias primals_62 = self.body.block2.unit2.conv3.weight primals_66 = self.body.block2.unit3.gn1.weight primals_67 = self.body.block2.unit3.gn1.bias primals_65 = self.body.block2.unit3.conv1.weight primals_69 = self.body.block2.unit3.gn2.weight primals_70 = self.body.block2.unit3.gn2.bias primals_68 = self.body.block2.unit3.conv2.weight primals_72 = self.body.block2.unit3.gn3.weight primals_73 = self.body.block2.unit3.gn3.bias primals_71 = self.body.block2.unit3.conv3.weight primals_75 = self.body.block2.unit4.gn1.weight primals_76 = self.body.block2.unit4.gn1.bias primals_74 = self.body.block2.unit4.conv1.weight primals_78 = self.body.block2.unit4.gn2.weight primals_79 = self.body.block2.unit4.gn2.bias primals_77 = self.body.block2.unit4.conv2.weight primals_81 = self.body.block2.unit4.gn3.weight primals_82 = self.body.block2.unit4.gn3.bias primals_80 = self.body.block2.unit4.conv3.weight primals_87 = self.body.block3.unit1.gn1.weight primals_88 = self.body.block3.unit1.gn1.bias primals_86 = self.body.block3.unit1.conv1.weight primals_90 = self.body.block3.unit1.gn2.weight primals_91 = self.body.block3.unit1.gn2.bias primals_89 = self.body.block3.unit1.conv2.weight primals_84 = self.body.block3.unit1.gn3.weight primals_85 = self.body.block3.unit1.gn3.bias primals_92 = self.body.block3.unit1.conv3.weight primals_83 = self.body.block3.unit1.downsample.weight primals_93 = self.body.block3.unit1.gn_proj.weight primals_94 = self.body.block3.unit1.gn_proj.bias primals_96 = self.body.block3.unit2.gn1.weight primals_97 = self.body.block3.unit2.gn1.bias primals_95 = self.body.block3.unit2.conv1.weight primals_99 = self.body.block3.unit2.gn2.weight primals_100 = self.body.block3.unit2.gn2.bias primals_98 = self.body.block3.unit2.conv2.weight primals_102 = self.body.block3.unit2.gn3.weight primals_103 = self.body.block3.unit2.gn3.bias primals_101 = self.body.block3.unit2.conv3.weight primals_105 = self.body.block3.unit3.gn1.weight primals_106 = self.body.block3.unit3.gn1.bias primals_104 = self.body.block3.unit3.conv1.weight primals_108 = self.body.block3.unit3.gn2.weight primals_109 = self.body.block3.unit3.gn2.bias primals_107 = self.body.block3.unit3.conv2.weight primals_111 = self.body.block3.unit3.gn3.weight primals_112 = self.body.block3.unit3.gn3.bias primals_110 = self.body.block3.unit3.conv3.weight primals_114 = self.body.block3.unit4.gn1.weight primals_115 = self.body.block3.unit4.gn1.bias primals_113 = self.body.block3.unit4.conv1.weight primals_117 = self.body.block3.unit4.gn2.weight primals_118 = self.body.block3.unit4.gn2.bias primals_116 = self.body.block3.unit4.conv2.weight primals_120 = self.body.block3.unit4.gn3.weight primals_121 = self.body.block3.unit4.gn3.bias primals_119 = self.body.block3.unit4.conv3.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121]) return output[0]
HodaEb/ViT-pytorch
ResNetV2
false
3,009
[ "MIT" ]
0
2643740b1d846ae666635bb0f5a71bceba208675
https://github.com/HodaEb/ViT-pytorch/tree/2643740b1d846ae666635bb0f5a71bceba208675
TransformerLayer
import torch import numpy as np import torch.nn as nn import torch.distributions class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads, kq_same=False, bias=True): super().__init__() """ It has projection layer for getting keys, queries and values. Followed by attention. """ self.d_model = d_model self.h = n_heads self.d_k = self.d_model // self.h self.kq_same = kq_same if not kq_same: self.q_linear = nn.Linear(d_model, d_model, bias=bias) self.k_linear = nn.Linear(d_model, d_model, bias=bias) self.v_linear = nn.Linear(d_model, d_model, bias=bias) def head_split(self, x): new_x_shape = x.size()[:-1] + (self.h, self.d_k) return x.view(*new_x_shape).transpose(-2, -3) def forward(self, q, k, v, mask=None): origin_shape = q.size() if not self.kq_same: q = self.head_split(self.q_linear(q)) else: q = self.head_split(self.k_linear(q)) k = self.head_split(self.k_linear(k)) v = self.head_split(self.v_linear(v)) output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask) output = output.transpose(-2, -3).reshape(origin_shape) return output @staticmethod def scaled_dot_product_attention(q, k, v, d_k, mask=None): """ This is called by Multi-head attention object to find the values. """ scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5 if mask is not None: scores = scores.masked_fill(mask == 0, -np.inf) scores = (scores - scores.max()).softmax(dim=-1) scores = scores.masked_fill(torch.isnan(scores), 0) output = torch.matmul(scores, v) return output class TransformerLayer(nn.Module): def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False): super().__init__() """ This is a Basic Block of Transformer. It contains one Multi-head attention object. Followed by layer norm and position wise feedforward net and dropout layer. """ self.masked_attn_head = MultiHeadAttention(d_model, n_heads, kq_same=kq_same) self.layer_norm1 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.linear1 = nn.Linear(d_model, d_ff) self.linear2 = nn.Linear(d_ff, d_model) self.layer_norm2 = nn.LayerNorm(d_model) self.dropout2 = nn.Dropout(dropout) def forward(self, seq, mask=None): context = self.masked_attn_head(seq, seq, seq, mask) context = self.layer_norm1(self.dropout1(context) + seq) output = self.linear1(context).relu() output = self.linear2(output) output = self.layer_norm2(self.dropout2(output) + context) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4, 'n_heads': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused_div_max_1(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused__softmax_div_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp5 = tmp2 - tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp7 - tmp4 tmp9 = triton_helpers.maximum(tmp5, tmp8) tmp11 = tmp10 * tmp1 tmp12 = tmp11 - tmp4 tmp13 = triton_helpers.maximum(tmp9, tmp12) tmp15 = tmp14 * tmp1 tmp16 = tmp15 - tmp4 tmp17 = triton_helpers.maximum(tmp13, tmp16) tmp18 = tmp5 - tmp17 tmp19 = tl_math.exp(tmp18) tmp20 = tmp8 - tmp17 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp12 - tmp17 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp16 - tmp17 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tl.store(out_ptr0 + x0, tmp17, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused__softmax_div_isnan_masked_fill_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp5 = tmp2 - tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = libdevice.isnan(tmp10).to(tl.int1) tmp12 = 0.0 tmp13 = tl.where(tmp11, tmp12, tmp10) tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](buf0, primals_3, buf3, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(64, 4)](buf1, primals_5, buf4, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((), (), torch.float32) triton_per_fused_div_max_1[grid(1)](buf5, buf6, 1, 1024, num_warps= 8, num_stages=1) buf7 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) triton_poi_fused__softmax_div_sub_2[grid(256)](buf5, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_div_isnan_masked_fill_sub_3[grid(1024)](buf5, buf6, buf7, buf8, buf9, 1024, XBLOCK=128, num_warps=4, num_stages=1 ) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(64, 4)](buf2, primals_7, buf10, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (64, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(64)](buf11, primals_1, buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 triton_poi_fused_add_native_layer_norm_5[grid(64, 4)](buf11, primals_1, buf12, buf13, primals_8, primals_9, buf14, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_9 buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf14, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf15 buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_6[grid(256)](buf16, primals_11, buf22, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf17 triton_poi_fused_add_7[grid(256)](buf18, primals_13, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf19 = buf13 del buf13 buf20 = buf12 del buf12 triton_poi_fused_native_layer_norm_8[grid(64)](buf18, buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(256)](buf18, buf19, buf20, primals_14, primals_15, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf19 del buf20 del primals_15 return (buf21, primals_1, primals_8, primals_14, buf5, buf6, buf11, reinterpret_tensor(buf14, (64, 4), (4, 1), 0), reinterpret_tensor( buf16, (64, 4), (4, 1), 0), buf18, primals_12, buf22, primals_10, reinterpret_tensor(buf9, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf10, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0)) class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads, kq_same=False, bias=True): super().__init__() """ It has projection layer for getting keys, queries and values. Followed by attention. """ self.d_model = d_model self.h = n_heads self.d_k = self.d_model // self.h self.kq_same = kq_same if not kq_same: self.q_linear = nn.Linear(d_model, d_model, bias=bias) self.k_linear = nn.Linear(d_model, d_model, bias=bias) self.v_linear = nn.Linear(d_model, d_model, bias=bias) def head_split(self, x): new_x_shape = x.size()[:-1] + (self.h, self.d_k) return x.view(*new_x_shape).transpose(-2, -3) def forward(self, q, k, v, mask=None): origin_shape = q.size() if not self.kq_same: q = self.head_split(self.q_linear(q)) else: q = self.head_split(self.k_linear(q)) k = self.head_split(self.k_linear(k)) v = self.head_split(self.v_linear(v)) output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask) output = output.transpose(-2, -3).reshape(origin_shape) return output @staticmethod def scaled_dot_product_attention(q, k, v, d_k, mask=None): """ This is called by Multi-head attention object to find the values. """ scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5 if mask is not None: scores = scores.masked_fill(mask == 0, -np.inf) scores = (scores - scores.max()).softmax(dim=-1) scores = scores.masked_fill(torch.isnan(scores), 0) output = torch.matmul(scores, v) return output class TransformerLayerNew(nn.Module): def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False): super().__init__() """ This is a Basic Block of Transformer. It contains one Multi-head attention object. Followed by layer norm and position wise feedforward net and dropout layer. """ self.masked_attn_head = MultiHeadAttention(d_model, n_heads, kq_same=kq_same) self.layer_norm1 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.linear1 = nn.Linear(d_model, d_ff) self.linear2 = nn.Linear(d_ff, d_model) self.layer_norm2 = nn.LayerNorm(d_model) self.dropout2 = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.masked_attn_head.q_linear.weight primals_3 = self.masked_attn_head.q_linear.bias primals_4 = self.masked_attn_head.k_linear.weight primals_5 = self.masked_attn_head.k_linear.bias primals_6 = self.masked_attn_head.v_linear.weight primals_7 = self.masked_attn_head.v_linear.bias primals_8 = self.layer_norm1.weight primals_9 = self.layer_norm1.bias primals_10 = self.linear1.weight primals_11 = self.linear1.bias primals_12 = self.linear2.weight primals_13 = self.linear2.bias primals_14 = self.layer_norm2.weight primals_15 = self.layer_norm2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
Yingting-dev/ReChorus
TransformerLayer
false
3,010
[ "MIT" ]
0
a16bc1e42f3e90e889133d7476c52ada44db573b
https://github.com/Yingting-dev/ReChorus/tree/a16bc1e42f3e90e889133d7476c52ada44db573b
Discriminator
import torch import torch.nn as nn class Discriminator(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2) self.activation1 = nn.Tanh() self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv2 = nn.Conv2d(64, 128, kernel_size=(5, 5)) self.activation2 = nn.Tanh() self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2)) self.fc1 = nn.Linear(128 * 5 * 5, 1024) self.activation3 = nn.Tanh() self.fc2 = nn.Linear(1024, 512) self.activation4 = nn.Tanh() self.fc3 = nn.Linear(512, 1) self.activation5 = nn.Sigmoid() def forward(self, x): x = x.view(-1, 1, 28, 28) x = self.conv1(x) x = self.activation1(x) x = self.maxpool1(x) x = self.conv2(x) x = self.activation2(x) x = self.maxpool2(x) x = x.view(-1, 128 * 5 * 5) x = self.fc1(x) x = self.activation3(x) x = self.fc2(x) x = self.activation4(x) x = self.fc3(x) x = self.activation5(x) return x def get_inputs(): return [torch.rand([4, 1, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_tanh_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 784 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 784 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(out_ptr0 + (y0 + 64 * x2 + 50176 * y1), tmp3, xmask & ymask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 14 x2 = xindex // 896 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3584 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3584 * x2), xmask) tmp3 = tl.load(in_ptr0 + (1792 + x0 + 128 * x1 + 3584 * x2), xmask) tmp5 = tl.load(in_ptr0 + (1856 + x0 + 128 * x1 + 3584 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 100 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 5 y1 = yindex // 5 y5 = yindex y4 = yindex // 25 y6 = yindex % 25 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1280 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1408 + x2 + 256 * y0 + 2560 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 25 * x2 + 3200 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_tanh_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_tanh_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (64, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (1024, 3200), (3200, 1)) assert_size_stride(primals_7, (1024,), (1,)) assert_size_stride(primals_8, (512, 1024), (1024, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (1, 512), (512, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(8192, 25)](primals_4, buf0, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf1 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 28, 28), (50176, 784, 28, 1)) buf2 = empty_strided_cuda((4, 64, 28, 28), (50176, 1, 1792, 64), torch.float32) triton_poi_fused_convolution_tanh_1[grid(256, 784)](buf1, primals_3, buf2, 256, 784, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf1 del primals_3 buf3 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32) buf4 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_2[grid(50176)](buf2, buf3, buf4, 50176, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 10, 10), (12800, 1, 1280, 128)) buf6 = buf5 del buf5 triton_poi_fused_convolution_tanh_3[grid(51200)](buf6, primals_5, 51200, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128), torch.int8) buf8 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_4[grid(100, 128)](buf6, buf7, buf8, 100, 128, XBLOCK=128, YBLOCK=2, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (4, 3200), (3200, 1), 0), reinterpret_tensor(primals_6, (3200, 1024), (1, 3200), 0), out=buf9 ) buf10 = buf9 del buf9 triton_poi_fused_tanh_5[grid(4096)](buf10, primals_7, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf11 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (1024, 512), (1, 1024), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_tanh_6[grid(2048)](buf12, primals_9, 2048, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_10, (512, 1), ( 1, 512), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_sigmoid_7[grid(4)](buf14, primals_11, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_11 return (buf14, primals_2, buf0, primals_1, buf2, buf3, buf4, buf6, buf7, reinterpret_tensor(buf8, (4, 3200), (3200, 1), 0), buf10, buf12, buf14, primals_10, primals_8, primals_6) class DiscriminatorNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2) self.activation1 = nn.Tanh() self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv2 = nn.Conv2d(64, 128, kernel_size=(5, 5)) self.activation2 = nn.Tanh() self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2)) self.fc1 = nn.Linear(128 * 5 * 5, 1024) self.activation3 = nn.Tanh() self.fc2 = nn.Linear(1024, 512) self.activation4 = nn.Tanh() self.fc3 = nn.Linear(512, 1) self.activation5 = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Ziems/pytorch-dcgan
Discriminator
false
3,011
[ "MIT" ]
0
1a251a330b9b0df6061a10463bce8057f1230797
https://github.com/Ziems/pytorch-dcgan/tree/1a251a330b9b0df6061a10463bce8057f1230797
SelfAttention
import math import torch import torch.nn as nn from torch.autograd import * class SelfAttention(nn.Module): dim_in: 'int' dim_k: 'int' dim_v: 'int' def __init__(self, dim_in, dim_k, dim_v): super(SelfAttention, self).__init__() self.dim_in = dim_in self.dim_k = dim_k self.dim_v = dim_v self.linear_q = nn.Linear(dim_in, dim_k, bias=False) self.linear_k = nn.Linear(dim_in, dim_k, bias=False) self.linear_v = nn.Linear(dim_in, dim_v, bias=False) self._norm_fact = 1 / math.sqrt(dim_k) def forward(self, x): _batch, _n, dim_in = x.shape assert dim_in == self.dim_in q = self.linear_q(x) k = self.linear_k(x) v = self.linear_v(x) dist = torch.bmm(q, k.transpose(1, 2)) * self._norm_fact dist = torch.softmax(dist, dim=-1) att = torch.bmm(dist, v) return att def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_k': 4, 'dim_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf6) return buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) class SelfAttentionNew(nn.Module): dim_in: 'int' dim_k: 'int' dim_v: 'int' def __init__(self, dim_in, dim_k, dim_v): super(SelfAttentionNew, self).__init__() self.dim_in = dim_in self.dim_k = dim_k self.dim_v = dim_v self.linear_q = nn.Linear(dim_in, dim_k, bias=False) self.linear_k = nn.Linear(dim_in, dim_k, bias=False) self.linear_v = nn.Linear(dim_in, dim_v, bias=False) self._norm_fact = 1 / math.sqrt(dim_k) def forward(self, input_0): primals_2 = self.linear_q.weight primals_3 = self.linear_k.weight primals_4 = self.linear_v.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
YapingZ/News-image-caption
SelfAttention
false
3,012
[ "Apache-2.0" ]
0
fcccf51bbe5607adbf71c1da8ecdc6693555993f
https://github.com/YapingZ/News-image-caption/tree/fcccf51bbe5607adbf71c1da8ecdc6693555993f
BasicBlock
import torch import torch.utils.data import torch.nn as nn def conv3x3(in_planes, out_planes, dilation=1, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, dilation=1, stride=1, downsample=None ): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, dilation, stride) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.relu(out) out = self.conv2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf3, primals_1, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, primals_3, buf1, buf4 def conv3x3(in_planes, out_planes, dilation=1, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False) class BasicBlockNew(nn.Module): expansion = 1 def __init__(self, inplanes, planes, dilation=1, stride=1, downsample=None ): super(BasicBlockNew, self).__init__() self.conv1 = conv3x3(inplanes, planes, dilation, stride) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.downsample = downsample self.stride = stride def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ZurMaD/DAIN
BasicBlock
false
3,013
[ "MIT" ]
0
22570e51e84f7dfd48ba4f88e6ee7c9ff1b0b123
https://github.com/ZurMaD/DAIN/tree/22570e51e84f7dfd48ba4f88e6ee7c9ff1b0b123
SimpleNet
import torch import torch.nn as nn import torch.utils.data import torch.nn.functional as F class SimpleNet(nn.Module): def __init__(self): super(SimpleNet, self).__init__() self.fc1 = nn.Linear(12288, 84) self.fc2 = nn.Linear(84, 50) self.fc3 = nn.Linear(50, 2) def forward(self, x): x = x.view(-1, 12288) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 12288])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 12288), (12288, 1)) assert_size_stride(primals_2, (84, 12288), (12288, 1)) assert_size_stride(primals_3, (84,), (1,)) assert_size_stride(primals_4, (50, 84), (84, 1)) assert_size_stride(primals_5, (50,), (1,)) assert_size_stride(primals_6, (2, 50), (50, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (12288, 84), (1, 12288), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(336)](buf1, primals_3, 336, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (84, 50), (1, 84), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(200)](buf3, primals_5, 200, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (50, 2), (1, 50), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class SimpleNetNew(nn.Module): def __init__(self): super(SimpleNetNew, self).__init__() self.fc1 = nn.Linear(12288, 84) self.fc2 = nn.Linear(84, 50) self.fc3 = nn.Linear(50, 2) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
aakgun/pytorch-VideoDataset
SimpleNet
false
3,014
[ "MIT" ]
0
619e385f37b99bfabca0b814673825ed902242b2
https://github.com/aakgun/pytorch-VideoDataset/tree/619e385f37b99bfabca0b814673825ed902242b2
WineLoss
import torch import torch.nn as nn class WineLoss(nn.Module): def __init__(self): super(WineLoss, self).__init__() self.smoothl1 = nn.SmoothL1Loss() def forward(self, pred, label): loss = self.smoothl1(pred, label) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class WineLossNew(nn.Module): def __init__(self): super(WineLossNew, self).__init__() self.smoothl1 = nn.SmoothL1Loss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ZhongyuanW/foundation_wines
WineLoss
false
3,015
[ "Apache-2.0" ]
0
92b9ae0ece46ecd291a9101ebef9e9421ead92d6
https://github.com/ZhongyuanW/foundation_wines/tree/92b9ae0ece46ecd291a9101ebef9e9421ead92d6
SA
import torch import torch.nn as nn class SA(nn.Module): def __init__(self, kernel_size=7): super(SA, self).__init__() assert kernel_size in (3, 7) padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=padding) self.sigmoid = nn.Sigmoid() def forward(self, x): inputs = x avg_pool = torch.mean(x, dim=1, keepdim=True) max_pool, _ = torch.max(x, dim=1, keepdim=True) x = torch.cat([avg_pool, max_pool], dim=1) x = self.conv1(x) return inputs * self.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp16, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp15, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, buf0, buf2 class SANew(nn.Module): def __init__(self, kernel_size=7): super(SANew, self).__init__() assert kernel_size in (3, 7) padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=padding) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ZhijieXiao-0624/CNXA
SA
false
3,016
[ "MIT" ]
0
a63b3561010cf87f696a005f8ea252e7cdaa7ca2
https://github.com/ZhijieXiao-0624/CNXA/tree/a63b3561010cf87f696a005f8ea252e7cdaa7ca2
GELU
import torch import torch.nn as nn import torch.nn.functional as F class GELU(nn.Module): def forward(self, input): return F.gelu(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GELUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ag8/mrl
GELU
false
3,017
[ "MIT" ]
0
f05b00347f88020cbeb216c7e4764a4d2523b67e
https://github.com/ag8/mrl/tree/f05b00347f88020cbeb216c7e4764a4d2523b67e
TransformerDecoderLayer
import torch from typing import Optional from torch import nn def _get_activation_fn(activation: 'str'): if activation == 'relu': return nn.functional.relu elif activation == 'gelu': return nn.functional.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerDecoderLayer(nn.Module): """ Modified from torch.nn.TransformerDecoderLayer. Add support of normalize_before, i.e., use layer_norm before the first block. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). Examples:: >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) >>> memory = torch.rand(10, 32, 512) >>> tgt = torch.rand(20, 32, 512) >>> out = decoder_layer(tgt, memory) """ def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int' =2048, dropout: 'float'=0.1, activation: 'str'='relu', normalize_before: 'bool'=True) ->None: super(TransformerDecoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.src_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def __setstate__(self, state): if 'activation' not in state: state['activation'] = nn.functional.relu super(TransformerDecoderLayer, self).__setstate__(state) def forward(self, tgt: 'torch.Tensor', memory: 'torch.Tensor', tgt_mask: 'Optional[torch.Tensor]'=None, memory_mask: 'Optional[torch.Tensor]'=None, tgt_key_padding_mask: 'Optional[torch.Tensor]'=None, memory_key_padding_mask: 'Optional[torch.Tensor]'=None) ->torch.Tensor: """Pass the inputs (and mask) through the decoder layer. Args: tgt: the sequence to the decoder layer (required). memory: the sequence from the last layer of the encoder (required). tgt_mask: the mask for the tgt sequence (optional). memory_mask: the mask for the memory sequence (optional). tgt_key_padding_mask: the mask for the tgt keys per batch (optional). memory_key_padding_mask: the mask for the memory keys per batch (optional). Shape: tgt: (T, N, E). memory: (S, N, E). tgt_mask: (T, T). memory_mask: (T, S). tgt_key_padding_mask: (N, T). memory_key_padding_mask: (N, S). S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number """ residual = tgt if self.normalize_before: tgt = self.norm1(tgt) tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = residual + self.dropout1(tgt2) if not self.normalize_before: tgt = self.norm1(tgt) residual = tgt if self.normalize_before: tgt = self.norm2(tgt) tgt2 = self.src_attn(tgt, memory, memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = residual + self.dropout2(tgt2) if not self.normalize_before: tgt = self.norm2(tgt) residual = tgt if self.normalize_before: tgt = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = residual + self.dropout3(tgt2) if not self.normalize_before: tgt = self.norm3(tgt) return tgt def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (12, 4), (4, 1)) assert_size_stride(primals_12, (12,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (2048, 4), (4, 1)) assert_size_stride(primals_18, (2048,), (1,)) assert_size_stride(primals_19, (4, 2048), (2048, 1)) assert_size_stride(primals_20, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha= 1, beta=1, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_7 buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12, buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_12, (4,), (1,), 4), primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_12, (4,), (1,), 8), primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf18) buf19 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0) del buf16 triton_poi_fused_mul_2[grid(16)](buf19, primals_12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 buf20 = buf8 del buf8 extern_kernels.bmm(buf19, reinterpret_tensor(buf17, (4, 1, 4), (1, 1, 4), 0), out=buf20) buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf20, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) buf22 = buf20 del buf20 triton_poi_fused__softmax_4[grid(64)](buf21, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf22, reinterpret_tensor(buf18, (4, 4, 1), (1, 4, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf23, buf24, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0) del buf23 extern_kernels.mm(reinterpret_tensor(buf24, (4, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf25) buf26 = buf25 del buf25 triton_poi_fused_add_8[grid(16)](buf26, primals_1, buf12, primals_14, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_14 buf27 = buf14 del buf14 buf28 = buf13 del buf13 triton_poi_fused_native_layer_norm_0[grid(4)](buf26, buf27, buf28, 4, XBLOCK=4, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf26, buf27, buf28, primals_15, primals_16, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf27 del buf28 del primals_16 buf30 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf29, reinterpret_tensor(primals_17, (4, 2048), (1, 4), 0), out=buf30) buf31 = buf30 del buf30 triton_poi_fused_relu_9[grid(8192)](buf31, primals_18, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_18 buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf31, reinterpret_tensor(primals_19, (2048, 4), (1, 2048), 0), out=buf32) buf33 = buf32 del buf32 triton_poi_fused_add_10[grid(16)](buf33, buf26, primals_20, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_20 return (buf33, primals_1, primals_8, primals_15, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, primals_10, buf22, reinterpret_tensor(buf24, (4, 4), (4, 1), 0), buf26, buf29, buf31, primals_19, primals_17, primals_13, reinterpret_tensor(buf18, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf19, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) def _get_activation_fn(activation: 'str'): if activation == 'relu': return nn.functional.relu elif activation == 'gelu': return nn.functional.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerDecoderLayerNew(nn.Module): """ Modified from torch.nn.TransformerDecoderLayer. Add support of normalize_before, i.e., use layer_norm before the first block. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). Examples:: >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) >>> memory = torch.rand(10, 32, 512) >>> tgt = torch.rand(20, 32, 512) >>> out = decoder_layer(tgt, memory) """ def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int' =2048, dropout: 'float'=0.1, activation: 'str'='relu', normalize_before: 'bool'=True) ->None: super(TransformerDecoderLayerNew, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.src_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def __setstate__(self, state): if 'activation' not in state: state['activation'] = nn.functional.relu super(TransformerDecoderLayerNew, self).__setstate__(state) def forward(self, input_0, input_1): primals_4 = self.self_attn.in_proj_weight primals_5 = self.self_attn.in_proj_bias primals_1 = self.self_attn.out_proj.weight primals_2 = self.self_attn.out_proj.bias primals_11 = self.src_attn.in_proj_weight primals_12 = self.src_attn.in_proj_bias primals_6 = self.src_attn.out_proj.weight primals_3 = self.src_attn.out_proj.bias primals_17 = self.linear1.weight primals_18 = self.linear1.bias primals_19 = self.linear2.weight primals_7 = self.linear2.bias primals_8 = self.norm1.weight primals_9 = self.norm1.bias primals_14 = self.norm2.weight primals_15 = self.norm2.bias primals_16 = self.norm3.weight primals_20 = self.norm3.bias primals_10 = input_0 primals_13 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0]
aarora8/icefall
TransformerDecoderLayer
false
3,018
[ "Apache-2.0" ]
0
8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a
https://github.com/aarora8/icefall/tree/8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a
SE
import torch import torch.nn as nn import torch.nn.functional as F def swish(x): return x * x.sigmoid() class SE(nn.Module): """Squeeze-and-Excitation block with Swish.""" def __init__(self, in_planes, se_planes): super(SE, self).__init__() self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True) self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True) def forward(self, x): out = F.adaptive_avg_pool2d(x, (1, 1)) out = swish(self.se1(out)) out = self.se2(out).sigmoid() out = x * out return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'se_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_convolution_mul_sigmoid_1[grid(16)](buf3, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf7, primals_1, primals_2, primals_4, buf1, buf3, buf4, buf6 def swish(x): return x * x.sigmoid() class SENew(nn.Module): """Squeeze-and-Excitation block with Swish.""" def __init__(self, in_planes, se_planes): super(SENew, self).__init__() self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True) self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True) def forward(self, input_0): primals_2 = self.se1.weight primals_3 = self.se1.bias primals_4 = self.se2.weight primals_5 = self.se2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
adarsh-kr/CVModelsBenchmark
SE
false
3,019
[ "MIT" ]
0
85aeb76c7c796d86baa97e1272aea83d734665b5
https://github.com/adarsh-kr/CVModelsBenchmark/tree/85aeb76c7c796d86baa97e1272aea83d734665b5
TransformerEncoderLayer
import torch from typing import Optional from torch import nn def _get_activation_fn(activation: 'str'): if activation == 'relu': return nn.functional.relu elif activation == 'gelu': return nn.functional.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerEncoderLayer(nn.Module): """ Modified from torch.nn.TransformerEncoderLayer. Add support of normalize_before, i.e., use layer_norm before the first block. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). normalize_before: whether to use layer_norm before the first block. Examples:: >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) >>> src = torch.rand(10, 32, 512) >>> out = encoder_layer(src) """ def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int' =2048, dropout: 'float'=0.1, activation: 'str'='relu', normalize_before: 'bool'=True) ->None: super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def __setstate__(self, state): if 'activation' not in state: state['activation'] = nn.functional.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src: 'torch.Tensor', src_mask: 'Optional[torch.Tensor]'=None, src_key_padding_mask: 'Optional[torch.Tensor]'=None) ->torch.Tensor: """ Pass the input through the encoder layer. Args: src: the sequence to the encoder layer (required). src_mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional) Shape: src: (S, N, E). src_mask: (S, S). src_key_padding_mask: (N, S). S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number """ residual = src if self.normalize_before: src = self.norm1(src) src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = residual + self.dropout1(src2) if not self.normalize_before: src = self.norm1(src) residual = src if self.normalize_before: src = self.norm2(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = residual + self.dropout2(src2) if not self.normalize_before: src = self.norm2(src) return src def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (2048, 4), (4, 1)) assert_size_stride(primals_11, (2048,), (1,)) assert_size_stride(primals_12, (4, 2048), (2048, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha= 1, beta=1, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_7 buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12, buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf13 del buf14 del primals_9 buf16 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 2048), (1, 4), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_8[grid(8192)](buf17, primals_11, 8192, XBLOCK =256, num_warps=4, num_stages=1) del primals_11 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (2048, 4), (1, 2048), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_add_9[grid(16)](buf19, primals_1, buf12, primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_13 return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor( buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4 ), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) def _get_activation_fn(activation: 'str'): if activation == 'relu': return nn.functional.relu elif activation == 'gelu': return nn.functional.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class TransformerEncoderLayerNew(nn.Module): """ Modified from torch.nn.TransformerEncoderLayer. Add support of normalize_before, i.e., use layer_norm before the first block. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). normalize_before: whether to use layer_norm before the first block. Examples:: >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) >>> src = torch.rand(10, 32, 512) >>> out = encoder_layer(src) """ def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int' =2048, dropout: 'float'=0.1, activation: 'str'='relu', normalize_before: 'bool'=True) ->None: super(TransformerEncoderLayerNew, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def __setstate__(self, state): if 'activation' not in state: state['activation'] = nn.functional.relu super(TransformerEncoderLayerNew, self).__setstate__(state) def forward(self, input_0): primals_4 = self.self_attn.in_proj_weight primals_5 = self.self_attn.in_proj_bias primals_1 = self.self_attn.out_proj.weight primals_2 = self.self_attn.out_proj.bias primals_10 = self.linear1.weight primals_11 = self.linear1.bias primals_12 = self.linear2.weight primals_3 = self.linear2.bias primals_7 = self.norm1.weight primals_8 = self.norm1.bias primals_9 = self.norm2.weight primals_13 = self.norm2.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
aarora8/icefall
TransformerEncoderLayer
false
3,020
[ "Apache-2.0" ]
0
8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a
https://github.com/aarora8/icefall/tree/8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a
SimpleFC
import torch import torch.nn as nn import torch.onnx class SimpleFC(nn.Module): def __init__(self, input_size, num_classes, name='SimpleFC'): super(SimpleFC, self).__init__() self.FC = nn.Parameter(torch.randn([input_size, num_classes])) self.FCbias = nn.Parameter(torch.randn([num_classes])) def forward(self, input): return torch.matmul(input, self.FC) + self.FCbias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class SimpleFCNew(nn.Module): def __init__(self, input_size, num_classes, name='SimpleFC'): super(SimpleFCNew, self).__init__() self.FC = nn.Parameter(torch.randn([input_size, num_classes])) self.FCbias = nn.Parameter(torch.randn([num_classes])) def forward(self, input_0): primals_1 = self.FC primals_3 = self.FCbias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
adityakusupati/EdgeML
SimpleFC
false
3,021
[ "MIT" ]
0
65933a6fdfc38945f4311043a62e120784b2b0bf
https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf
DummyAttention
import torch import torch.nn as nn import torch.nn.functional as F class DummyAttention(nn.Module): def __init__(self, in_channels): super().__init__() self.linear1 = nn.Linear(in_channels, 32) self.linear2 = nn.Linear(32, 1) pass def forward(self, net): value = net net = self.linear1(net) net = F.relu(net) net = self.linear2(net) net = torch.flatten(net, 1) net = F.softmax(net, dim=1) net = torch.unsqueeze(net, dim=1) net = torch.matmul(net, value) net = torch.flatten(net, 1) return net pass def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (32, 4), (4, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (1, 32), (32, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 32), (128, 32, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, primals_3, buf7, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0 ), primals_1, out=buf6) del buf5 return reinterpret_tensor(buf6, (4, 4), (4, 1), 0 ), primals_1, reinterpret_tensor(buf1, (16, 32), (32, 1), 0 ), buf3, primals_4, buf7 class DummyAttentionNew(nn.Module): def __init__(self, in_channels): super().__init__() self.linear1 = nn.Linear(in_channels, 32) self.linear2 = nn.Linear(32, 1) pass pass def forward(self, input_0): primals_2 = self.linear1.weight primals_3 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
aaalgo/health_outcome_challenge
DummyAttention
false
3,022
[ "BSD-3-Clause" ]
0
6d0cb660123ac6fb4939c1d50a8f1914e8e3e165
https://github.com/aaalgo/health_outcome_challenge/tree/6d0cb660123ac6fb4939c1d50a8f1914e8e3e165
ProtoNN
import torch import numpy as np import torch.nn as nn import torch.onnx class ProtoNN(nn.Module): def __init__(self, inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma, W=None, B=None, Z=None): """ Forward computation graph for ProtoNN. inputDimension: Input data dimension or feature dimension. projectionDimension: hyperparameter numPrototypes: hyperparameter numOutputLabels: The number of output labels or classes W, B, Z: Numpy matrices that can be used to initialize projection matrix(W), prototype matrix (B) and prototype labels matrix (B). Expected Dimensions: W inputDimension (d) x projectionDimension (d_cap) B projectionDimension (d_cap) x numPrototypes (m) Z numOutputLabels (L) x numPrototypes (m) """ super(ProtoNN, self).__init__() self.__d = inputDimension self.__d_cap = projectionDimension self.__m = numPrototypes self.__L = numOutputLabels self.W, self.B, self.Z = None, None, None self.gamma = gamma self.__validInit = False self.__initWBZ(W, B, Z) self.__validateInit() def __validateInit(self): self.__validinit = False errmsg = 'Dimensions mismatch! Should be W[d, d_cap]' errmsg += ', B[d_cap, m] and Z[L, m]' d, d_cap, m, L, _ = self.getHyperParams() assert self.W.shape[0] == d, errmsg assert self.W.shape[1] == d_cap, errmsg assert self.B.shape[0] == d_cap, errmsg assert self.B.shape[1] == m, errmsg assert self.Z.shape[0] == L, errmsg assert self.Z.shape[1] == m, errmsg self.__validInit = True def __initWBZ(self, inW, inB, inZ): if inW is None: self.W = torch.randn([self.__d, self.__d_cap]) self.W = nn.Parameter(self.W) else: self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32))) if inB is None: self.B = torch.randn([self.__d_cap, self.__m]) self.B = nn.Parameter(self.B) else: self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32))) if inZ is None: self.Z = torch.randn([self.__L, self.__m]) self.Z = nn.Parameter(self.Z) else: self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32))) def getHyperParams(self): """ Returns the model hyperparameters: [inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma] """ d = self.__d dcap = self.__d_cap m = self.__m L = self.__L return d, dcap, m, L, self.gamma def getModelMatrices(self): """ Returns model matrices, which can then be evaluated to obtain corresponding numpy arrays. These can then be exported as part of other implementations of ProtonNN, for instance a C++ implementation or pure python implementation. Returns [ProjectionMatrix (W), prototypeMatrix (B), prototypeLabelsMatrix (Z), gamma] """ return self.W, self.B, self.Z, self.gamma def forward(self, X): """ This method is responsible for construction of the forward computation graph. The end point of the computation graph, or in other words the output operator for the forward computation is returned. X: Input of shape [-1, inputDimension] returns: The forward computation outputs, self.protoNNOut """ assert self.__validInit is True, 'Initialization failed!' W, B, Z, gamma = self.W, self.B, self.Z, self.gamma WX = torch.matmul(X, W) dim = [-1, WX.shape[1], 1] WX = torch.reshape(WX, dim) dim = [1, B.shape[0], -1] B_ = torch.reshape(B, dim) l2sim = B_ - WX l2sim = torch.pow(l2sim, 2) l2sim = torch.sum(l2sim, dim=1, keepdim=True) self.l2sim = l2sim gammal2sim = -1 * gamma * gamma * l2sim M = torch.exp(gammal2sim) dim = [1] + list(Z.shape) Z_ = torch.reshape(Z, dim) y = Z_ * M y = torch.sum(y, dim=2) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputDimension': 4, 'projectionDimension': 4, 'numPrototypes': 4, 'numOutputLabels': 4, 'gamma': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_exp_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = -16.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp8 = tmp7 * tmp2 tmp9 = tl_math.exp(tmp8) tmp10 = tmp6 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp13 * tmp2 tmp15 = tl_math.exp(tmp14) tmp16 = tmp12 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tl_math.exp(tmp20) tmp22 = tmp18 * tmp21 tmp23 = tmp17 + tmp22 tl.store(out_ptr0 + x2, tmp23, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_sum_0[grid(256)](primals_2, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_exp_mul_sum_1[grid(256)](primals_3, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, buf1, primals_2, primals_3, buf0, buf1, reinterpret_tensor( primals_4, (4, 64), (1, 4), 0) class ProtoNNNew(nn.Module): def __init__(self, inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma, W=None, B=None, Z=None): """ Forward computation graph for ProtoNN. inputDimension: Input data dimension or feature dimension. projectionDimension: hyperparameter numPrototypes: hyperparameter numOutputLabels: The number of output labels or classes W, B, Z: Numpy matrices that can be used to initialize projection matrix(W), prototype matrix (B) and prototype labels matrix (B). Expected Dimensions: W inputDimension (d) x projectionDimension (d_cap) B projectionDimension (d_cap) x numPrototypes (m) Z numOutputLabels (L) x numPrototypes (m) """ super(ProtoNNNew, self).__init__() self.__d = inputDimension self.__d_cap = projectionDimension self.__m = numPrototypes self.__L = numOutputLabels self.W, self.B, self.Z = None, None, None self.gamma = gamma self.__validInit = False self.__initWBZ(W, B, Z) self.__validateInit() def __validateInit(self): self.__validinit = False errmsg = 'Dimensions mismatch! Should be W[d, d_cap]' errmsg += ', B[d_cap, m] and Z[L, m]' d, d_cap, m, L, _ = self.getHyperParams() assert self.W.shape[0] == d, errmsg assert self.W.shape[1] == d_cap, errmsg assert self.B.shape[0] == d_cap, errmsg assert self.B.shape[1] == m, errmsg assert self.Z.shape[0] == L, errmsg assert self.Z.shape[1] == m, errmsg self.__validInit = True def __initWBZ(self, inW, inB, inZ): if inW is None: self.W = torch.randn([self.__d, self.__d_cap]) self.W = nn.Parameter(self.W) else: self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32))) if inB is None: self.B = torch.randn([self.__d_cap, self.__m]) self.B = nn.Parameter(self.B) else: self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32))) if inZ is None: self.Z = torch.randn([self.__L, self.__m]) self.Z = nn.Parameter(self.Z) else: self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32))) def getHyperParams(self): """ Returns the model hyperparameters: [inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma] """ d = self.__d dcap = self.__d_cap m = self.__m L = self.__L return d, dcap, m, L, self.gamma def getModelMatrices(self): """ Returns model matrices, which can then be evaluated to obtain corresponding numpy arrays. These can then be exported as part of other implementations of ProtonNN, for instance a C++ implementation or pure python implementation. Returns [ProjectionMatrix (W), prototypeMatrix (B), prototypeLabelsMatrix (Z), gamma] """ return self.W, self.B, self.Z, self.gamma def forward(self, input_0): primals_1 = self.W primals_2 = self.B primals_3 = self.Z primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
adityakusupati/EdgeML
ProtoNN
false
3,023
[ "MIT" ]
0
65933a6fdfc38945f4311043a62e120784b2b0bf
https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf
ReferenceWeightBinarizationModule
import torch from torch import nn from torchvision import models as models import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.onnx class ReferenceDOREFABinarize(torch.autograd.Function): @staticmethod def forward(ctx, x): norm = x.abs().mean() sign = (x > 0).type(x.dtype) * 2 - 1 output_flat = sign * norm return output_flat.view_as(x) @staticmethod def backward(ctx, grad_output): return grad_output class ReferenceXNORBinarize(torch.autograd.Function): @staticmethod def forward(ctx, x): norm = x.abs().mean([1, 2, 3], keepdim=True) sign = (x > 0).type(x.dtype) * 2 - 1 output = sign * norm return output @staticmethod def backward(ctx, grad_output): return grad_output class ReferenceWeightBinarizationModule(nn.Module): def __init__(self, mode='xnor'): super().__init__() self.mode = mode if self.mode == 'xnor': self.binarize = ReferenceXNORBinarize.apply elif self.mode == 'dorefa': self.binarize = ReferenceDOREFABinarize.apply def forward(self, input_): return self.binarize(input_) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torchvision import models as models import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_abs_gt_mean_mul_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = 0.0 tmp7 = tmp0 > tmp6 tmp8 = tmp7.to(tl.float32) tmp9 = 2.0 tmp10 = tmp8 * tmp9 tmp11 = 1.0 tmp12 = tmp10 - tmp11 tmp13 = 64.0 tmp14 = tmp5 / tmp13 tmp15 = tmp12 * tmp14 tl.store(out_ptr1 + (r1 + 64 * x0), tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_abs_gt_mean_mul_sub_0[grid(4)](arg0_1, buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class ReferenceDOREFABinarize(torch.autograd.Function): @staticmethod def forward(ctx, x): norm = x.abs().mean() sign = (x > 0).type(x.dtype) * 2 - 1 output_flat = sign * norm return output_flat.view_as(x) @staticmethod def backward(ctx, grad_output): return grad_output class ReferenceXNORBinarize(torch.autograd.Function): @staticmethod def forward(ctx, x): norm = x.abs().mean([1, 2, 3], keepdim=True) sign = (x > 0).type(x.dtype) * 2 - 1 output = sign * norm return output @staticmethod def backward(ctx, grad_output): return grad_output class ReferenceWeightBinarizationModuleNew(nn.Module): def __init__(self, mode='xnor'): super().__init__() self.mode = mode if self.mode == 'xnor': self.binarize = ReferenceXNORBinarize.apply elif self.mode == 'dorefa': self.binarize = ReferenceDOREFABinarize.apply def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JinYAnGHe/openvino_training_extensions
ReferenceWeightBinarizationModule
false
3,024
[ "Apache-2.0" ]
0
a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
https://github.com/JinYAnGHe/openvino_training_extensions/tree/a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
StateInitZero
import torch from torch import nn from torchvision import models as models import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.onnx class StateInitZero(nn.Module): def __init__(self, hidden_size, num_layers=1, batch_first=False): super(StateInitZero, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.batch_first = batch_first def forward(self, input: 'torch.Tensor'): h0 = input.new_zeros((self.num_layers, input.size(0 if self. batch_first else 1), self.hidden_size)) c0 = input.new_zeros((self.num_layers, input.size(0 if self. batch_first else 1), self.hidden_size)) return h0, c0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torchvision import models as models import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_new_zeros_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf0, buf1 class StateInitZeroNew(nn.Module): def __init__(self, hidden_size, num_layers=1, batch_first=False): super(StateInitZeroNew, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.batch_first = batch_first def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
JinYAnGHe/openvino_training_extensions
StateInitZero
false
3,025
[ "Apache-2.0" ]
0
a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
https://github.com/JinYAnGHe/openvino_training_extensions/tree/a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
FakeReLUM
import torch import torch.nn as nn from typing import * import torch.utils.data class FakeReLU(torch.autograd.Function): @staticmethod def forward(ctx, input): return input.clamp(min=0) @staticmethod def backward(ctx, grad_output): return grad_output class FakeReLUM(nn.Module): def forward(self, x): return FakeReLU.apply(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from typing import * import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class FakeReLU(torch.autograd.Function): @staticmethod def forward(ctx, input): return input.clamp(min=0) @staticmethod def backward(ctx, grad_output): return grad_output class FakeReLUMNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
agarwalsiddhant10/blackbox-smoothing
FakeReLUM
false
3,026
[ "MIT" ]
0
cf18a9dc45f807494955d0cf19a3d1dd4315b54f
https://github.com/agarwalsiddhant10/blackbox-smoothing/tree/cf18a9dc45f807494955d0cf19a3d1dd4315b54f
Conv2dSame
import math import torch import torch.utils.data import torch import torch.nn as nn class Conv2dSame(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1): super(Conv2dSame, self).__init__() self.F = kernel_size self.S = stride self.D = dilation self.layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, dilation=dilation) def forward(self, x_in): _N, _C, H, W = x_in.shape H2 = math.ceil(H / self.S) W2 = math.ceil(W / self.S) Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))( x_in) x_relu = nn.ReLU()(x_pad) x_out = self.layer(x_relu) return x_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_relu_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class Conv2dSameNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1): super(Conv2dSameNew, self).__init__() self.F = kernel_size self.S = stride self.D = dilation self.layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, dilation=dilation) def forward(self, input_0): primals_1 = self.layer.weight primals_3 = self.layer.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
adityamehta00/HIDeGAN
Conv2dSame
false
3,027
[ "BSD-3-Clause" ]
0
91a0674e092ccde2784a82bf927dfefd8673eb4c
https://github.com/adityamehta00/HIDeGAN/tree/91a0674e092ccde2784a82bf927dfefd8673eb4c
VAE
import torch import torch.nn as nn import torch.utils.data import torch.nn.init as init def kaiming_init(m): if isinstance(m, (nn.Linear, nn.Conv2d)): init.kaiming_normal(m.weight) if m.bias is not None: m.bias.data.fill_(0) elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)): m.weight.data.fill_(1) if m.bias is not None: m.bias.data.fill_(0) class VAE(nn.Module): def __init__(self, length, n_latent): super(VAE, self).__init__() self.length = length self.n_latent = n_latent self.C1 = nn.Conv2d(self.length, 400, kernel_size=1) self.C2 = nn.Conv2d(400, 400, kernel_size=1) self.C31 = nn.Conv2d(400, self.n_latent, kernel_size=1) self.C32 = nn.Conv2d(400, self.n_latent, kernel_size=1) self.C4 = nn.Conv2d(self.n_latent, 400, kernel_size=1) self.C5 = nn.Conv2d(400, 400, kernel_size=1) self.C6 = nn.Conv2d(400, self.length, kernel_size=1) self.relu = nn.ReLU(inplace=True) def weights_init(self): for module in self.modules(): kaiming_init(module) def encode(self, x): out = self.C1(x) out = self.relu(out) out = self.C2(out) out = self.relu(out) mu = self.C31(out) logvar = self.C32(out) return mu, logvar def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): out = self.C4(z) out = self.relu(out) out = self.C5(out) out = self.relu(out) out = self.C6(out) return out def forward(self, x): mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) return self.decode(z), z, mu, logvar def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'length': 4, 'n_latent': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_exp_mul_randn_like_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp4 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x2 + 16 * y3), xmask & ymask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = 0.5 tmp8 = tmp5 * tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = tmp6 * tmp9 tmp11 = tmp2 + tmp10 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) tl.store(out_ptr1 + (x2 + 16 * y3), tmp5, xmask & ymask) tl.store(out_ptr2 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask) tl.store(out_ptr3 + (x2 + 16 * y3), tmp11, xmask & ymask) tl.store(out_ptr4 + (y0 + 4 * x2 + 64 * y1), tmp11, xmask & ymask) @triton.jit def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (400, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (400, 400, 1, 1), (400, 1, 1, 1)) assert_size_stride(primals_5, (400,), (1,)) assert_size_stride(primals_6, (4, 400, 1, 1), (400, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 400, 1, 1), (400, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (400, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_11, (400,), (1,)) assert_size_stride(primals_12, (400, 400, 1, 1), (400, 1, 1, 1)) assert_size_stride(primals_13, (400,), (1,)) assert_size_stride(primals_14, (4, 400, 1, 1), (400, 1, 1, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 400, 4, 4), (6400, 1, 1600, 400)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(25600)](buf2, primals_2, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 400, 4, 4), (6400, 1, 1600, 400)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(25600)](buf4, primals_5, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 1, 16, 4)) buf9 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf10 = buf9 del buf9 buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 1, 16, 4)) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_add_convolution_exp_mul_randn_like_2[grid(16, 16)]( buf5, primals_7, buf7, primals_9, buf10, buf6, buf8, buf11, buf12, buf13, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf10 del buf5 del buf7 del primals_7 del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 400, 4, 4), (6400, 1, 1600, 400)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_1[grid(25600)](buf15, primals_11, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf16 = extern_kernels.convolution(buf15, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 400, 4, 4), (6400, 1, 1600, 400)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_1[grid(25600)](buf17, primals_13, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 4, 4, 4), (64, 1, 16, 4)) buf19 = reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf13 triton_poi_fused_convolution_3[grid(16, 16)](buf18, primals_15, buf19, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf18 del primals_15 return (buf19, buf12, buf6, buf8, primals_1, buf0, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf2, buf4, buf8, buf11, buf12, buf15, buf17) def kaiming_init(m): if isinstance(m, (nn.Linear, nn.Conv2d)): init.kaiming_normal(m.weight) if m.bias is not None: m.bias.data.fill_(0) elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)): m.weight.data.fill_(1) if m.bias is not None: m.bias.data.fill_(0) class VAENew(nn.Module): def __init__(self, length, n_latent): super(VAENew, self).__init__() self.length = length self.n_latent = n_latent self.C1 = nn.Conv2d(self.length, 400, kernel_size=1) self.C2 = nn.Conv2d(400, 400, kernel_size=1) self.C31 = nn.Conv2d(400, self.n_latent, kernel_size=1) self.C32 = nn.Conv2d(400, self.n_latent, kernel_size=1) self.C4 = nn.Conv2d(self.n_latent, 400, kernel_size=1) self.C5 = nn.Conv2d(400, 400, kernel_size=1) self.C6 = nn.Conv2d(400, self.length, kernel_size=1) self.relu = nn.ReLU(inplace=True) def weights_init(self): for module in self.modules(): kaiming_init(module) def encode(self, x): out = self.C1(x) out = self.relu(out) out = self.C2(out) out = self.relu(out) mu = self.C31(out) logvar = self.C32(out) return mu, logvar def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): out = self.C4(z) out = self.relu(out) out = self.C5(out) out = self.relu(out) out = self.C6(out) return out def forward(self, input_0): primals_1 = self.C1.weight primals_2 = self.C1.bias primals_4 = self.C2.weight primals_5 = self.C2.bias primals_6 = self.C31.weight primals_7 = self.C31.bias primals_8 = self.C32.weight primals_9 = self.C32.bias primals_10 = self.C4.weight primals_11 = self.C4.bias primals_12 = self.C5.weight primals_13 = self.C5.bias primals_14 = self.C6.weight primals_15 = self.C6.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0], output[1], output[2], output[3]
aasensio/neural_hinode
VAE
false
3,028
[ "MIT" ]
0
63ec076d920f82343618ce67669c73a3b5209957
https://github.com/aasensio/neural_hinode/tree/63ec076d920f82343618ce67669c73a3b5209957
UGRNNLRCell
import torch import torch.nn as nn import torch.onnx def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class UGRNNLRCell(RNNCell): """ UGRNN LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 3 matrices if not None else creates 2 matrices) uRank = rank of U matrix (creates 3 matrices if not None else creates 2 matrices) UGRNN architecture and compression techniques are found in UGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(W1x_t + U1h_{t-1} + B_g) h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='UGRNNLR'): super(UGRNNLRCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'UGRNNLR' def forward(self, input, state): if self._wRank is None: wComp1 = torch.matmul(input, self.W1) wComp2 = torch.matmul(input, self.W2) else: wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1) wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2) if self._uRank is None: uComp1 = torch.matmul(state, self.U1) uComp2 = torch.matmul(state, self.U2) else: uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1) uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2) pre_comp1 = wComp1 + uComp1 pre_comp2 = wComp2 + uComp2 z = gen_nonlinearity(pre_comp1 + self.bias_gate, self. _gate_nonlinearity) c = gen_nonlinearity(pre_comp2 + self.bias_update, self. _update_nonlinearity) new_h = z * state + (1.0 - z) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 2: Vars.extend([self.W1, self.W2]) else: Vars.extend([self.W, self.W1, self.W2]) if self._num_U_matrices == 2: Vars.extend([self.U1, self.U2]) else: Vars.extend([self.U, self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr1 + x2, xmask) tmp7 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp5 tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(in_out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr0 + x2, tmp17, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_6, out=buf3) del primals_6 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf4, buf5, buf2, primals_7, buf3, primals_8, primals_5, buf6, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_7 del primals_8 return buf6, primals_5, buf4, buf5, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class UGRNNLRCellNew(RNNCell): """ UGRNN LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 3 matrices if not None else creates 2 matrices) uRank = rank of U matrix (creates 3 matrices if not None else creates 2 matrices) UGRNN architecture and compression techniques are found in UGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(W1x_t + U1h_{t-1} + B_g) h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='UGRNNLR'): super(UGRNNLRCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'UGRNNLR' def getVars(self): Vars = [] if self._num_W_matrices == 2: Vars.extend([self.W1, self.W2]) else: Vars.extend([self.W, self.W1, self.W2]) if self._num_U_matrices == 2: Vars.extend([self.U1, self.U2]) else: Vars.extend([self.U, self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) return Vars def forward(self, input_0, input_1): primals_1 = self.W1 primals_3 = self.W2 primals_4 = self.U1 primals_6 = self.U2 primals_7 = self.bias_gate primals_8 = self.bias_update primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
adityakusupati/EdgeML
UGRNNLRCell
false
3,029
[ "MIT" ]
0
65933a6fdfc38945f4311043a62e120784b2b0bf
https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf
LSTMPredictor
import torch import torch.nn as nn class LSTMPredictor(nn.Module): def __init__(self, look_back, target_days): super(LSTMPredictor, self).__init__() self.layer_a = nn.Linear(look_back, 32) self.relu = nn.ReLU() self.output = nn.Linear(32, target_days) def predict(self, input): with torch.no_grad(): return self.forward(input).tolist() def forward(self, input): logits = self.output(self.relu(self.layer_a(input))) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'look_back': 4, 'target_days': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 32), (32, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf3, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), primals_4, buf3 class LSTMPredictorNew(nn.Module): def __init__(self, look_back, target_days): super(LSTMPredictorNew, self).__init__() self.layer_a = nn.Linear(look_back, 32) self.relu = nn.ReLU() self.output = nn.Linear(32, target_days) def predict(self, input): with torch.no_grad(): return self.forward(input).tolist() def forward(self, input_0): primals_1 = self.layer_a.weight primals_2 = self.layer_a.bias primals_4 = self.output.weight primals_5 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Yu-Hao-88/NN_stock_prediction
LSTMPredictor
false
3,030
[ "MIT" ]
0
bb84a3f4450d95af317d60d83dcd53ad4f3d350d
https://github.com/Yu-Hao-88/NN_stock_prediction/tree/bb84a3f4450d95af317d60d83dcd53ad4f3d350d
ReconstructionBlock
import math import torch import torch.utils.data import torch import torch.nn as nn class Conv2dSame(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1): super(Conv2dSame, self).__init__() self.F = kernel_size self.S = stride self.D = dilation self.layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, dilation=dilation) def forward(self, x_in): _N, _C, H, W = x_in.shape H2 = math.ceil(H / self.S) W2 = math.ceil(W / self.S) Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))( x_in) x_relu = nn.ReLU()(x_pad) x_out = self.layer(x_relu) return x_out class ReconstructionBlock(nn.Module): """Define a Resnet block""" def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn. BatchNorm2d, use_dropout=False): """Initialize the Resnet block A resnet block is a conv block with skip connections We construct a conv block with build_conv_block function, and implement skip connections in <forward> function. Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf """ super(ReconstructionBlock, self).__init__() self.C1R1 = Conv2dSame(input_nc, output_nc, kernel_size=1) def forward(self, input): """Forward function (with skip connections)""" return self.C1R1(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_nc': 4, 'output_nc': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class Conv2dSame(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1): super(Conv2dSame, self).__init__() self.F = kernel_size self.S = stride self.D = dilation self.layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, dilation=dilation) def forward(self, x_in): _N, _C, H, W = x_in.shape H2 = math.ceil(H / self.S) W2 = math.ceil(W / self.S) Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))( x_in) x_relu = nn.ReLU()(x_pad) x_out = self.layer(x_relu) return x_out class ReconstructionBlockNew(nn.Module): """Define a Resnet block""" def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn. BatchNorm2d, use_dropout=False): """Initialize the Resnet block A resnet block is a conv block with skip connections We construct a conv block with build_conv_block function, and implement skip connections in <forward> function. Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf """ super(ReconstructionBlockNew, self).__init__() self.C1R1 = Conv2dSame(input_nc, output_nc, kernel_size=1) def forward(self, input_0): primals_2 = self.C1R1.layer.weight primals_3 = self.C1R1.layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
adityamehta00/HIDeGAN
ReconstructionBlock
false
3,031
[ "BSD-3-Clause" ]
0
91a0674e092ccde2784a82bf927dfefd8673eb4c
https://github.com/adityamehta00/HIDeGAN/tree/91a0674e092ccde2784a82bf927dfefd8673eb4c
FastRNNCell
import torch import torch.nn as nn import torch.onnx def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastRNNCell(RNNCell): """ FastRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. alphaInit = init for alpha, the update scalar betaInit = init for beta, the weight for previous state FastRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, alphaInit=- 3.0, betaInit=3.0, name='FastRNN'): super(FastRNNCell, self).__init__(input_size, hidden_size, None, update_nonlinearity, 1, 1, 1, wRank, uRank, wSparsity, uSparsity) self._alphaInit = alphaInit self._betaInit = betaInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1])) self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastRNN' def forward(self, input, state): if self._wRank is None: wComp = torch.matmul(input, self.W) else: wComp = torch.matmul(torch.matmul(input, self.W1), self.W2) if self._uRank is None: uComp = torch.matmul(state, self.U) else: uComp = torch.matmul(torch.matmul(state, self.U1), self.U2) pre_comp = wComp + uComp c = gen_nonlinearity(pre_comp + self.bias_update, self. _update_nonlinearity) new_h = torch.sigmoid(self.beta) * state + torch.sigmoid(self.alpha ) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_update]) Vars.extend([self.alpha, self.beta]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp9 = tl.load(in_ptr3 + x2, xmask) tmp11 = tl.load(in_ptr4 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp8 = tl.sigmoid(tmp7) tmp10 = tmp8 * tmp9 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp13 * tmp5 tmp15 = tmp10 + tmp14 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_tanh_0[grid(256)](buf2, buf1, primals_5, primals_6, primals_4, primals_7, buf3, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf1 del primals_5 return buf3, primals_4, primals_6, primals_7, buf2, reinterpret_tensor( primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastRNNCellNew(RNNCell): """ FastRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. alphaInit = init for alpha, the update scalar betaInit = init for beta, the weight for previous state FastRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, alphaInit=- 3.0, betaInit=3.0, name='FastRNN'): super(FastRNNCellNew, self).__init__(input_size, hidden_size, None, update_nonlinearity, 1, 1, 1, wRank, uRank, wSparsity, uSparsity) self._alphaInit = alphaInit self._betaInit = betaInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1])) self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastRNN' def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_update]) Vars.extend([self.alpha, self.beta]) return Vars def forward(self, input_0, input_1): primals_1 = self.W primals_3 = self.U primals_5 = self.bias_update primals_6 = self.alpha primals_7 = self.beta primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
adityakusupati/EdgeML
FastRNNCell
false
3,032
[ "MIT" ]
0
65933a6fdfc38945f4311043a62e120784b2b0bf
https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf
LocallyConnectedLayer1d
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class LocallyConnectedLayer1d(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= True, bias=False): """ Defines one locally connected layer for one dimensional vector input. NOTE: This model only takes one-dimensional inputs. Batched inputs are also fine. input_dim: column size of weight matrix output_dim: row size of weight matrix kernel_size: number of local connections per parameter stride: number of strides of local connections, CANNOT BE ZERO padding: whether or not to zero pad bias: whether or not to have a bias term """ super(LocallyConnectedLayer1d, self).__init__() initrange = 0.1 self.weight = nn.Parameter(torch.empty(output_dim, kernel_size)) torch.nn.init.uniform_(self.weight, -initrange, initrange) if bias: self.bias = nn.Parameter(torch.zeros(output_dim)) else: self.register_parameter('bias', None) self.kernel_size = kernel_size self.stride = stride if padding is True: pad_size = stride * (output_dim - 1) + kernel_size - input_dim self.input_dim = input_dim + pad_size self.pad = True else: resulting_dim = (input_dim - kernel_size) / stride + 1 self.extra_dim = output_dim - resulting_dim self.pad = False def forward(self, x): k = self.kernel_size s = self.stride instance_dim = len(x.size()) - 1 if self.pad: pad_size = self.input_dim - x.size()[instance_dim] if pad_size >= 0: pad = 0, pad_size x = F.pad(x, pad=pad) x = x.unfold(instance_dim, k, s) else: x = x.unfold(instance_dim, k, s) if instance_dim == 0: x = x[:pad_size] else: x = x[:, :pad_size, :] else: x = x.unfold(instance_dim, k, s) for i in self.extra_dim: if instance_dim == 0: x = torch.cat((x, x[-1]), dim=instance_dim) else: x = torch.cat((x, x[:, -1, :]), dim=instance_dim) out = torch.sum(x * self.weight, dim=instance_dim + 1) if self.bias is not None: out += self.bias return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4, 'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = xindex // 7 x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 7 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + x0 + 7 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + x0 + 7 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + x0 + 7 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 7), (112, 28, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(448)](primals_1, buf0, 448, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_1[grid(256)](buf0, primals_2, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(buf0, (4, 4, 4, 4, 4), (112, 28, 7, 1, 1), 0) class LocallyConnectedLayer1dNew(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= True, bias=False): """ Defines one locally connected layer for one dimensional vector input. NOTE: This model only takes one-dimensional inputs. Batched inputs are also fine. input_dim: column size of weight matrix output_dim: row size of weight matrix kernel_size: number of local connections per parameter stride: number of strides of local connections, CANNOT BE ZERO padding: whether or not to zero pad bias: whether or not to have a bias term """ super(LocallyConnectedLayer1dNew, self).__init__() initrange = 0.1 self.weight = nn.Parameter(torch.empty(output_dim, kernel_size)) torch.nn.init.uniform_(self.weight, -initrange, initrange) if bias: self.bias = nn.Parameter(torch.zeros(output_dim)) else: self.register_parameter('bias', None) self.kernel_size = kernel_size self.stride = stride if padding is True: pad_size = stride * (output_dim - 1) + kernel_size - input_dim self.input_dim = input_dim + pad_size self.pad = True else: resulting_dim = (input_dim - kernel_size) / stride + 1 self.extra_dim = output_dim - resulting_dim self.pad = False def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
adityayedetore/LCRNN
LocallyConnectedLayer1d
false
3,033
[ "MIT" ]
0
7b6afaf6098fed584b90fe0196cfd26aa6a190c5
https://github.com/adityayedetore/LCRNN/tree/7b6afaf6098fed584b90fe0196cfd26aa6a190c5
AugCNN
import torch import torch.nn as nn import torch.nn.functional as F def apply_init_(modules): """ Initialize NN modules """ for m in modules: if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) class Conv2d_tf(nn.Conv2d): """ Conv2d with the padding behavior from TF """ def __init__(self, *args, **kwargs): super(Conv2d_tf, self).__init__(*args, **kwargs) self.padding = kwargs.get('padding', 'SAME') def _compute_padding(self, input, dim): input_size = input.size(dim + 2) filter_size = self.weight.size(dim + 2) effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1 out_size = (input_size + self.stride[dim] - 1) // self.stride[dim] total_padding = max(0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size) additional_padding = int(total_padding % 2 != 0) return additional_padding, total_padding def forward(self, input): if self.padding == 'VALID': return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups) rows_odd, padding_rows = self._compute_padding(input, dim=0) cols_odd, padding_cols = self._compute_padding(input, dim=1) if rows_odd or cols_odd: input = F.pad(input, [0, cols_odd, 0, rows_odd]) return F.conv2d(input, self.weight, self.bias, self.stride, padding =(padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) class AugCNN(nn.Module): """ Convolutional Neural Network used as Augmentation """ def __init__(self): super(AugCNN, self).__init__() self.aug = Conv2d_tf(3, 3, kernel_size=3) apply_init_(self.modules()) self.train() def forward(self, obs): return self.aug(obs) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, primals_1, primals_2 def apply_init_(modules): """ Initialize NN modules """ for m in modules: if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) class Conv2d_tf(nn.Conv2d): """ Conv2d with the padding behavior from TF """ def __init__(self, *args, **kwargs): super(Conv2d_tf, self).__init__(*args, **kwargs) self.padding = kwargs.get('padding', 'SAME') def _compute_padding(self, input, dim): input_size = input.size(dim + 2) filter_size = self.weight.size(dim + 2) effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1 out_size = (input_size + self.stride[dim] - 1) // self.stride[dim] total_padding = max(0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size) additional_padding = int(total_padding % 2 != 0) return additional_padding, total_padding def forward(self, input): if self.padding == 'VALID': return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups) rows_odd, padding_rows = self._compute_padding(input, dim=0) cols_odd, padding_cols = self._compute_padding(input, dim=1) if rows_odd or cols_odd: input = F.pad(input, [0, cols_odd, 0, rows_odd]) return F.conv2d(input, self.weight, self.bias, self.stride, padding =(padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) class AugCNNNew(nn.Module): """ Convolutional Neural Network used as Augmentation """ def __init__(self): super(AugCNNNew, self).__init__() self.aug = Conv2d_tf(3, 3, kernel_size=3) apply_init_(self.modules()) self.train() def forward(self, input_0): primals_2 = self.aug.weight primals_3 = self.aug.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
agarwl/auto-drac
AugCNN
false
3,034
[ "MIT" ]
0
d86c480b51929e6e4ec0ae1adba84d9f78e91705
https://github.com/agarwl/auto-drac/tree/d86c480b51929e6e4ec0ae1adba84d9f78e91705
GRULRCell
import torch import torch.nn as nn import torch.onnx def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class GRULRCell(RNNCell): """ GRU LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 4 matrices if not None else creates 3 matrices) uRank = rank of U matrix (creates 4 matrices if not None else creates 3 matrices) GRU architecture and compression techniques are found in GRU(LINK) paper Basic architecture is like: r_t = gate_nl(W1x_t + U1h_{t-1} + B_r) z_t = gate_nl(W2x_t + U2h_{t-1} + B_g) h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='GRULR'): super(GRULRCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 3, 3, 3, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W3 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_r = nn.Parameter(torch.ones([1, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'GRULR' def forward(self, input, state): if self._wRank is None: wComp1 = torch.matmul(input, self.W1) wComp2 = torch.matmul(input, self.W2) wComp3 = torch.matmul(input, self.W3) else: wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1) wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2) wComp3 = torch.matmul(torch.matmul(input, self.W), self.W3) if self._uRank is None: uComp1 = torch.matmul(state, self.U1) uComp2 = torch.matmul(state, self.U2) else: uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1) uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2) pre_comp1 = wComp1 + uComp1 pre_comp2 = wComp2 + uComp2 r = gen_nonlinearity(pre_comp1 + self.bias_r, self._gate_nonlinearity) z = gen_nonlinearity(pre_comp2 + self.bias_gate, self. _gate_nonlinearity) if self._uRank is None: pre_comp3 = wComp3 + torch.matmul(r * state, self.U3) else: pre_comp3 = wComp3 + torch.matmul(torch.matmul(r * state, self. U), self.U3) c = gen_nonlinearity(pre_comp3 + self.bias_update, self. _update_nonlinearity) new_h = z * state + (1.0 - z) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 3: Vars.extend([self.W1, self.W2, self.W3]) else: Vars.extend([self.W, self.W1, self.W2, self.W3]) if self._num_U_matrices == 3: Vars.extend([self.U1, self.U2, self.U3]) else: Vars.extend([self.U, self.U1, self.U2, self.U3]) Vars.extend([self.bias_r, self.bias_gate, self.bias_update]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp5 tmp10 = tmp5 * tmp9 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr1 + x2, xmask) tmp7 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp5 tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(in_out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr0 + x2, tmp17, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_5, out=buf3) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_7, out=buf4) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0[grid(256)](buf0, buf3, primals_8, primals_6, buf6, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf7 = buf3 del buf3 extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), primals_10, out=buf7) buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(256)](buf5, buf8, buf4, primals_9, buf7, primals_11, primals_6, buf9, 256, XBLOCK =128, num_warps=4, num_stages=1) del buf4 del buf7 del primals_11 del primals_9 return buf9, primals_6, buf5, buf8, reinterpret_tensor(buf6, (4, 64), ( 1, 4), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0 ), buf10, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class GRULRCellNew(RNNCell): """ GRU LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 4 matrices if not None else creates 3 matrices) uRank = rank of U matrix (creates 4 matrices if not None else creates 3 matrices) GRU architecture and compression techniques are found in GRU(LINK) paper Basic architecture is like: r_t = gate_nl(W1x_t + U1h_{t-1} + B_r) z_t = gate_nl(W2x_t + U2h_{t-1} + B_g) h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='GRULR'): super(GRULRCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 3, 3, 3, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W3 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_r = nn.Parameter(torch.ones([1, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'GRULR' def getVars(self): Vars = [] if self._num_W_matrices == 3: Vars.extend([self.W1, self.W2, self.W3]) else: Vars.extend([self.W, self.W1, self.W2, self.W3]) if self._num_U_matrices == 3: Vars.extend([self.U1, self.U2, self.U3]) else: Vars.extend([self.U, self.U1, self.U2, self.U3]) Vars.extend([self.bias_r, self.bias_gate, self.bias_update]) return Vars def forward(self, input_0, input_1): primals_1 = self.W1 primals_3 = self.W2 primals_4 = self.W3 primals_5 = self.U1 primals_7 = self.U2 primals_10 = self.U3 primals_8 = self.bias_r primals_9 = self.bias_gate primals_11 = self.bias_update primals_2 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
adityakusupati/EdgeML
GRULRCell
false
3,035
[ "MIT" ]
0
65933a6fdfc38945f4311043a62e120784b2b0bf
https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf
RGBDiff
import torch from torch import nn from torchvision import models as models import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.onnx class RGBDiff(nn.Module): def __init__(self, dim=1): super().__init__() self.dim = dim def forward(self, image): """ Args: image (torch.Tensor): (N x T x C x H x W) """ diffs = [] for i in range(1, image.size(self.dim)): prev = image.index_select(self.dim, image.new_tensor(i - 1, dtype=torch.long)) current = image.index_select(self.dim, image.new_tensor(i, dtype=torch.long)) diffs.append(current - prev) return torch.cat(diffs, dim=self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torchvision import models as models import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 2, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tl.full([1], 3, tl.int64) tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 - tmp23 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp19, tmp24, tmp25) tmp27 = tl.where(tmp13, tmp18, tmp26) tmp28 = tl.where(tmp4, tmp9, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RGBDiffNew(nn.Module): def __init__(self, dim=1): super().__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JinYAnGHe/openvino_training_extensions
RGBDiff
false
3,036
[ "Apache-2.0" ]
0
a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
https://github.com/JinYAnGHe/openvino_training_extensions/tree/a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
BiaffineScorer
import torch import torch.nn as nn class BiaffineScorer(nn.Module): def __init__(self, input1_size, input2_size, output_size): super().__init__() self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_() def forward(self, input1, input2): input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1) ], len(input1.size()) - 1) input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1) ], len(input2.size()) - 1) return self.W_bilin(input1, input2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input1_size': 4, 'input2_size': 4, 'output_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 5), (25, 5, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(320)](primals_1, buf0, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_cat_0[grid(320)](primals_2, buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, ( 64, 5), (5, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 5), (5, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf3 = buf2 del buf2 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_add_1[grid(256)](buf4, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 return buf4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0 ), reinterpret_tensor(buf1, (64, 5), (5, 1), 0) class BiaffineScorerNew(nn.Module): def __init__(self, input1_size, input2_size, output_size): super().__init__() self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_() def forward(self, input_0, input_1): primals_3 = self.W_bilin.weight primals_4 = self.W_bilin.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
WEYAI/PhoNLP
BiaffineScorer
false
3,037
[ "Apache-2.0" ]
0
8fefe49965dc6346c224a5636d9333a7ddf55a2c
https://github.com/WEYAI/PhoNLP/tree/8fefe49965dc6346c224a5636d9333a7ddf55a2c
DHead
import torch import torch.nn as nn from math import * class DHead(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(256, 1, 4) def forward(self, x): output = torch.sigmoid(self.conv(x)) return output def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from math import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 14884 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 61, 61), (3721, 3721, 61, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_sigmoid_0[grid(14884)](buf1, primals_2, 14884, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf1 class DHeadNew(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(256, 1, 4) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
a8252525/NIID-Bench
DHead
false
3,038
[ "MIT" ]
0
33df8d3a7b941884eec3c7bd52adb8a9476eb282
https://github.com/a8252525/NIID-Bench/tree/33df8d3a7b941884eec3c7bd52adb8a9476eb282
FeatureExtractionBlock
import math import torch import torch.utils.data import torch import torch.nn as nn class Conv2dSame(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1): super(Conv2dSame, self).__init__() self.F = kernel_size self.S = stride self.D = dilation self.layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, dilation=dilation) def forward(self, x_in): _N, _C, H, W = x_in.shape H2 = math.ceil(H / self.S) W2 = math.ceil(W / self.S) Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))( x_in) x_relu = nn.ReLU()(x_pad) x_out = self.layer(x_relu) return x_out class FeatureExtractionBlock(nn.Module): """Initialize the Resnet block A resnet block is a conv block with skip connections We construct a conv block with build_conv_block function, and implement skip connections in <forward> function. Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf """ def __init__(self, input_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): """Construct a convolutional block. Parameters: dim (int) -- the number of channels in the conv layer. norm_layer -- normalization layer use_dropout (bool) -- if use dropout layers. use_bias (bool) -- if the conv layer uses bias or not Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) """ super(FeatureExtractionBlock, self).__init__() self.C3R1 = Conv2dSame(input_nc, ngf, kernel_size=3) self.C1R1 = Conv2dSame(ngf, ngf, kernel_size=1) self.C3R2 = Conv2dSame(input_nc, ngf, kernel_size=3) self.C1R2 = Conv2dSame(ngf, ngf, kernel_size=1) def forward(self, input): return self.C3R1(input) + self.C3R2(input) + self.C1R1(self.C3R1(input) ) + self.C1R2(self.C3R2(input)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_nc': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp4 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x3, None) tmp8 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr5 + x3, None) tmp12 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tl.store(in_out_ptr0 + x3, tmp14, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_9, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_relu_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1)) buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1)) buf3 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) triton_poi_fused_convolution_relu_1[grid(4096)](buf1, primals_3, buf3, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1)) buf5 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) triton_poi_fused_convolution_relu_1[grid(4096)](buf2, primals_5, buf5, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 4, 4), (1024, 16, 4, 1)) buf7 = buf1 del buf1 triton_poi_fused_add_convolution_2[grid(4096)](buf7, primals_3, buf2, primals_5, buf4, primals_7, buf6, primals_9, 4096, XBLOCK =256, num_warps=4, num_stages=1) del buf2 del buf4 del buf6 del primals_3 del primals_5 del primals_7 del primals_9 return buf7, primals_2, primals_4, primals_6, primals_8, buf0, buf3, buf5 class Conv2dSame(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1): super(Conv2dSame, self).__init__() self.F = kernel_size self.S = stride self.D = dilation self.layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, dilation=dilation) def forward(self, x_in): _N, _C, H, W = x_in.shape H2 = math.ceil(H / self.S) W2 = math.ceil(W / self.S) Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))( x_in) x_relu = nn.ReLU()(x_pad) x_out = self.layer(x_relu) return x_out class FeatureExtractionBlockNew(nn.Module): """Initialize the Resnet block A resnet block is a conv block with skip connections We construct a conv block with build_conv_block function, and implement skip connections in <forward> function. Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf """ def __init__(self, input_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): """Construct a convolutional block. Parameters: dim (int) -- the number of channels in the conv layer. norm_layer -- normalization layer use_dropout (bool) -- if use dropout layers. use_bias (bool) -- if the conv layer uses bias or not Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) """ super(FeatureExtractionBlockNew, self).__init__() self.C3R1 = Conv2dSame(input_nc, ngf, kernel_size=3) self.C1R1 = Conv2dSame(ngf, ngf, kernel_size=1) self.C3R2 = Conv2dSame(input_nc, ngf, kernel_size=3) self.C1R2 = Conv2dSame(ngf, ngf, kernel_size=1) def forward(self, input_0): primals_2 = self.C3R1.layer.weight primals_3 = self.C3R1.layer.bias primals_6 = self.C1R1.layer.weight primals_5 = self.C1R1.layer.bias primals_4 = self.C3R2.layer.weight primals_7 = self.C3R2.layer.bias primals_8 = self.C1R2.layer.weight primals_9 = self.C1R2.layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
adityamehta00/HIDeGAN
FeatureExtractionBlock
false
3,039
[ "BSD-3-Clause" ]
0
91a0674e092ccde2784a82bf927dfefd8673eb4c
https://github.com/adityamehta00/HIDeGAN/tree/91a0674e092ccde2784a82bf927dfefd8673eb4c
FastGRNNCell
import torch import torch.nn as nn import torch.onnx def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastGRNNCell(RNNCell): """ FastGRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. zetaInit = init for zeta, the scale param nuInit = init for nu, the translation param FastGRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(Wx_t + Uh_{t-1} + B_g) h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'): super(FastGRNNCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank, wSparsity, uSparsity) self._zetaInit = zetaInit self._nuInit = nuInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1])) self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1])) self.copy_previous_UW() @property def name(self): return self._name @property def cellType(self): return 'FastGRNN' def forward(self, input, state): if self._wRank is None: wComp = torch.matmul(input, self.W) else: wComp = torch.matmul(torch.matmul(input, self.W1), self.W2) if self._uRank is None: uComp = torch.matmul(state, self.U) else: uComp = torch.matmul(torch.matmul(state, self.U1), self.U2) pre_comp = wComp + uComp z = gen_nonlinearity(pre_comp + self.bias_gate, self._gate_nonlinearity ) c = gen_nonlinearity(pre_comp + self.bias_update, self. _update_nonlinearity) new_h = z * state + (torch.sigmoid(self.zeta) * (1.0 - z) + torch. sigmoid(self.nu)) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) Vars.extend([self.zeta, self.nu]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x1 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x0, xmask) tmp8 = tl.load(in_ptr3 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr4 + 0) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp10 = tl.sigmoid(tmp9) tmp11 = 1.0 tmp12 = tmp11 - tmp5 tmp13 = tmp10 * tmp12 tmp16 = tl.sigmoid(tmp15) tmp17 = tmp13 + tmp16 tmp19 = tmp2 + tmp18 tmp20 = libdevice.tanh(tmp19) tmp21 = tmp17 * tmp20 tmp22 = tmp7 + tmp21 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp22, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, 1), (1, 1)) assert_size_stride(primals_8, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf2, buf1, primals_5, primals_4, primals_7, primals_8, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return (buf3, primals_4, primals_5, primals_6, primals_7, primals_8, buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastGRNNCellNew(RNNCell): """ FastGRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. zetaInit = init for zeta, the scale param nuInit = init for nu, the translation param FastGRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(Wx_t + Uh_{t-1} + B_g) h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'): super(FastGRNNCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank, wSparsity, uSparsity) self._zetaInit = zetaInit self._nuInit = nuInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1])) self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1])) self.copy_previous_UW() @property def name(self): return self._name @property def cellType(self): return 'FastGRNN' def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) Vars.extend([self.zeta, self.nu]) return Vars def forward(self, input_0, input_1): primals_1 = self.W primals_3 = self.U primals_5 = self.bias_gate primals_6 = self.bias_update primals_7 = self.zeta primals_8 = self.nu primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
adityakusupati/EdgeML
FastGRNNCell
false
3,040
[ "MIT" ]
0
65933a6fdfc38945f4311043a62e120784b2b0bf
https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf
Actor
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc_units=256): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc_units) self.fc2 = nn.Linear(fc_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(-0.003, 0.003) def forward(self, state): """Build an actor (policy) network that maps states -> actions.""" x = F.relu(self.fc1(state)) return F.tanh(self.fc2(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 256), (256, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf4, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_1[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), buf3, primals_4, buf4 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class ActorNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc_units=256): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(ActorNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc_units) self.fc2 = nn.Linear(fc_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ahgit2021/deep-reinforcement-learning
Actor
false
3,041
[ "MIT" ]
0
081464ba45f803663e841e1635d829aa00cce870
https://github.com/ahgit2021/deep-reinforcement-learning/tree/081464ba45f803663e841e1635d829aa00cce870
FitNet
import torch import torch.nn as nn class FitNet(nn.Module): def __init__(self, in_feature, out_feature): super().__init__() self.in_feature = in_feature self.out_feature = out_feature self.transform = nn.Conv2d(in_feature, out_feature, 1, bias=False) self.transform.weight.data.uniform_(-0.005, 0.005) def forward(self, student, teacher): if student.dim() == 2: student = student.unsqueeze(2).unsqueeze(3) teacher = teacher.unsqueeze(2).unsqueeze(3) return (self.transform(student) - teacher).pow(2).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_feature': 4, 'out_feature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 2.0 tmp8 = tmp2 * tmp7 tmp9 = 256.0 tmp10 = tmp6 / tmp9 tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp8, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_mean_mul_pow_sub_0[grid(1)](buf3, buf0, primals_3, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del primals_3 return buf3, primals_1, primals_2, buf2 class FitNetNew(nn.Module): def __init__(self, in_feature, out_feature): super().__init__() self.in_feature = in_feature self.out_feature = out_feature self.transform = nn.Conv2d(in_feature, out_feature, 1, bias=False) self.transform.weight.data.uniform_(-0.005, 0.005) def forward(self, input_0, input_1): primals_2 = self.transform.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
ahu-hpt/AOMD
FitNet
false
3,043
[ "Apache-2.0" ]
0
8d99dbb803feaef55fc089bfb3399d2fb21d55d8
https://github.com/ahu-hpt/AOMD/tree/8d99dbb803feaef55fc089bfb3399d2fb21d55d8
AE
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn class AE(nn.Module): def __init__(self, num_channels): super(AE, self).__init__() self.enc1 = nn.Conv2d(num_channels, 64, kernel_size=3, padding=1) self.enc2 = nn.Conv2d(64, 32, kernel_size=3, padding=1) self.enc3 = nn.Conv2d(32, 16, kernel_size=3, padding=1) self.enc4 = nn.Conv2d(16, 8, kernel_size=3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.dec1 = nn.ConvTranspose2d(8, 8, kernel_size=2, stride=2) self.dec2 = nn.ConvTranspose2d(8, 16, kernel_size=2, stride=2) self.dec3 = nn.ConvTranspose2d(16, 32, kernel_size=2, stride=2) self.dec4 = nn.ConvTranspose2d(32, 64, kernel_size=2, stride=2) self.out = nn.Conv2d(64, num_channels, kernel_size=3, padding=1) def forward(self, x): x = F.relu(self.enc1(x)) x = self.pool(x) x = F.relu(self.enc2(x)) x = self.pool(x) x = F.relu(self.enc3(x)) x = self.pool(x) x = F.relu(self.enc4(x)) enc = self.pool(x) res = F.relu(self.dec1(enc)) res = F.relu(self.dec2(res)) res = F.relu(self.dec3(res)) res = F.relu(self.dec4(res)) res = self.out(res) return enc, res def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 8 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (8, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_9, (8,), (1,)) assert_size_stride(primals_10, (8, 8, 2, 2), (32, 4, 2, 1)) assert_size_stride(primals_11, (8,), (1,)) assert_size_stride(primals_12, (8, 16, 2, 2), (64, 4, 2, 1)) assert_size_stride(primals_13, (16,), (1,)) assert_size_stride(primals_14, (16, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_15, (32,), (1,)) assert_size_stride(primals_16, (32, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (4, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf1, buf2, buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch. float32) buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf9, buf10, buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 8, 8, 8), (512, 64, 8, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_6[grid(2048)](buf13, primals_9, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) buf15 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(512)](buf13, buf14, buf15, 512, XBLOCK=128, num_warps=4, num_stages=1) buf16 = extern_kernels.convolution(buf14, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 8, 8, 8), (512, 64, 8, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_6[grid(2048)](buf17, primals_11, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf18 = extern_kernels.convolution(buf17, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 16, 16, 16), (4096, 256, 16, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_4[grid(16384)](buf19, primals_13, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf20 = extern_kernels.convolution(buf19, primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_2[grid(131072)](buf21, primals_15, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf22 = extern_kernels.convolution(buf21, primals_16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_0[grid(1048576)](buf23, primals_17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf24 = extern_kernels.convolution(buf23, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_8[grid(65536)](buf25, primals_19, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_19 return (buf14, buf25, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf19, buf21, buf23) class AENew(nn.Module): def __init__(self, num_channels): super(AENew, self).__init__() self.enc1 = nn.Conv2d(num_channels, 64, kernel_size=3, padding=1) self.enc2 = nn.Conv2d(64, 32, kernel_size=3, padding=1) self.enc3 = nn.Conv2d(32, 16, kernel_size=3, padding=1) self.enc4 = nn.Conv2d(16, 8, kernel_size=3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.dec1 = nn.ConvTranspose2d(8, 8, kernel_size=2, stride=2) self.dec2 = nn.ConvTranspose2d(8, 16, kernel_size=2, stride=2) self.dec3 = nn.ConvTranspose2d(16, 32, kernel_size=2, stride=2) self.dec4 = nn.ConvTranspose2d(32, 64, kernel_size=2, stride=2) self.out = nn.Conv2d(64, num_channels, kernel_size=3, padding=1) def forward(self, input_0): primals_1 = self.enc1.weight primals_2 = self.enc1.bias primals_4 = self.enc2.weight primals_5 = self.enc2.bias primals_6 = self.enc3.weight primals_7 = self.enc3.bias primals_8 = self.enc4.weight primals_9 = self.enc4.bias primals_10 = self.dec1.weight primals_11 = self.dec1.bias primals_12 = self.dec2.weight primals_13 = self.dec2.bias primals_14 = self.dec3.weight primals_15 = self.dec3.bias primals_16 = self.dec4.weight primals_17 = self.dec4.bias primals_18 = self.out.weight primals_19 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0], output[1]
ahanagemini/Phd_final_year_old_sr
AE
false
3,045
[ "BSD-2-Clause" ]
0
62be9d1294acdb724a2fe424789b657a44e2cd7d
https://github.com/ahanagemini/Phd_final_year_old_sr/tree/62be9d1294acdb724a2fe424789b657a44e2cd7d
Critic
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=256, fc2_units=256, fc3_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, fc3_units) self.fc4 = nn.Linear(fc3_units, 2) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(*hidden_init(self.fc3)) self.fc4.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.leaky_relu(self.fcs1(state)) x = torch.cat((xs, action), dim=1) x = F.leaky_relu(self.fc2(x)) x = F.leaky_relu(self.fc3(x)) return self.fc4(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 260 x1 = xindex // 260 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (256 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0).to(tl.int1) tmp6 = tl.load(in_ptr1 + (256 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp7 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.01 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 260, tl.int64) tmp17 = tl.load(in_ptr3 + (4 * x1 + (-256 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (256, 260), (260, 1)) assert_size_stride(primals_6, (256,), (1,)) assert_size_stride(primals_7, (128, 256), (256, 1)) assert_size_stride(primals_8, (128,), (1,)) assert_size_stride(primals_9, (2, 128), (128, 1)) assert_size_stride(primals_10, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 256), (256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(1024)](buf0, primals_2, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 260), (260, 1), torch.float32) triton_poi_fused_cat_1[grid(1040)](buf1, buf0, primals_2, primals_4, buf2, 1040, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (260, 256), ( 1, 260), 0), out=buf3) buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool) buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32) triton_poi_fused_leaky_relu_2[grid(1024)](buf3, primals_6, buf4, buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_6 buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 128), ( 1, 256), 0), out=buf6) buf7 = empty_strided_cuda((4, 128), (128, 1), torch.bool) buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32) triton_poi_fused_leaky_relu_3[grid(512)](buf6, primals_8, buf7, buf8, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_8 buf9 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_10, buf8, reinterpret_tensor(primals_9, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf9) del primals_10 return (buf9, primals_3, buf1, buf2, buf4, buf5, buf7, buf8, primals_9, primals_7, primals_5) def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=256, fc2_units=256, fc3_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, fc3_units) self.fc4 = nn.Linear(fc3_units, 2) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(*hidden_init(self.fc3)) self.fc4.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_9 = self.fc4.weight primals_10 = self.fc4.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
ahgit2021/deep-reinforcement-learning
Critic
false
3,046
[ "MIT" ]
0
081464ba45f803663e841e1635d829aa00cce870
https://github.com/ahgit2021/deep-reinforcement-learning/tree/081464ba45f803663e841e1635d829aa00cce870
Network
import torch import torch.nn as nn class Network(nn.Module): def __init__(self, number_of_inputs, number_of_outputs): super(Network, self).__init__() self.l1 = nn.Linear(number_of_inputs, number_of_inputs) self.l2 = nn.Linear(number_of_inputs, number_of_inputs) self.l3 = nn.Linear(number_of_inputs, number_of_outputs) self.sigmoid = nn.Sigmoid() def forward(self, x): out1 = self.l1(x) out2 = self.l2(out1) y_pred = self.sigmoid(self.l3(out2)) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'number_of_inputs': 4, 'number_of_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_6, (4, 4), (1, 4 ), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(256)](buf3, primals_7, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf1, buf3, primals_6, primals_4 class NetworkNew(nn.Module): def __init__(self, number_of_inputs, number_of_outputs): super(NetworkNew, self).__init__() self.l1 = nn.Linear(number_of_inputs, number_of_inputs) self.l2 = nn.Linear(number_of_inputs, number_of_inputs) self.l3 = nn.Linear(number_of_inputs, number_of_outputs) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ajthinking/migration-analysis
Network
false
3,047
[ "MIT" ]
0
c48312c5bf513a37e23eaaf8aa245921992f8e7f
https://github.com/ajthinking/migration-analysis/tree/c48312c5bf513a37e23eaaf8aa245921992f8e7f
AttentionTransfer
import torch import torch.nn as nn import torch.nn.functional as F class AttentionTransfer(nn.Module): def forward(self, student, teacher): s_attention = F.normalize(student.pow(2).mean(1).view(student.size( 0), -1)) with torch.no_grad(): t_attention = F.normalize(teacher.pow(2).mean(1).view(teacher. size(0), -1)) return (s_attention - t_attention).pow(2).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_linalg_vector_norm_sub_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp8 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp23 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp26 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp19 = tmp18 * tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp28 / tmp11 tmp30 = tmp29 * tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.where(xmask, tmp31, 0) tmp34 = tl.sum(tmp33, 1)[:, None] tmp35 = libdevice.sqrt(tmp17) tmp36 = 1e-12 tmp37 = triton_helpers.maximum(tmp35, tmp36) tmp38 = tmp12 / tmp37 tmp39 = libdevice.sqrt(tmp34) tmp40 = triton_helpers.maximum(tmp39, tmp36) tmp41 = tmp29 / tmp40 tmp42 = tmp38 - tmp41 tl.store(out_ptr2 + (r1 + 16 * x0), tmp42, xmask) @triton.jit def triton_per_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_linalg_vector_norm_sub_0[grid(4)](arg0_1, arg1_1, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_mean_pow_1[grid(1)](buf4, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf4, class AttentionTransferNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ahu-hpt/AOMD
AttentionTransfer
false
3,048
[ "Apache-2.0" ]
0
8d99dbb803feaef55fc089bfb3399d2fb21d55d8
https://github.com/ahu-hpt/AOMD/tree/8d99dbb803feaef55fc089bfb3399d2fb21d55d8
HardDarkRank
import torch import torch.nn as nn def pdist(e, squared=False, eps=1e-12): e_square = e.pow(2).sum(dim=1) prod = e @ e.t() res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clamp(min =eps) if not squared: res = res.sqrt() res = res.clone() res[range(len(e)), range(len(e))] = 0 return res class HardDarkRank(nn.Module): def __init__(self, alpha=3, beta=3, permute_len=4): super().__init__() self.alpha = alpha self.beta = beta self.permute_len = permute_len def forward(self, student, teacher): score_teacher = -1 * self.alpha * pdist(teacher, squared=False).pow( self.beta) score_student = -1 * self.alpha * pdist(student, squared=False).pow( self.beta) permute_idx = score_teacher.sort(dim=1, descending=True)[1][:, 1: self.permute_len + 1] ordered_student = torch.gather(score_student, 1, permute_idx) log_prob = (ordered_student - torch.stack([torch.logsumexp( ordered_student[:, i:], dim=1) for i in range(permute_idx.size( 1))], dim=1)).sum(dim=1) loss = (-1 * log_prob).mean() return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_clamp_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = 1e-12 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = libdevice.sqrt(tmp28) tl.store(in_out_ptr0 + x2, tmp29, xmask) @triton.jit def triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp11 = 0.0 tl.store(out_ptr0 + tl.broadcast_to(5 * tmp10, [XBLOCK]), tmp11, xmask) @triton.jit def triton_per_fused_mul_pow_sort_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tmp1 * tmp0 tmp3 = -3.0 tmp4 = tmp2 * tmp3 tmp5 = r1 tmp6 = tmp5.to(tl.int16) tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp8 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) _tmp9, tmp10 = triton_helpers.sort_with_index(tmp7, tmp8, None, 1, stable=False, descending=True) tl.store(out_ptr0 + (r1 + 4 * x0), tmp10, xmask) @triton.jit def triton_poi_fused_gather_logsumexp_mul_pow_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (tmp5 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp8 = tmp7 * tmp7 tmp9 = tmp8 * tmp7 tmp10 = -3.0 tmp11 = tmp9 * tmp10 tmp13 = tmp12.to(tl.int64) tmp14 = tmp13 + tmp2 tmp15 = tmp13 < 0 tmp16 = tl.where(tmp15, tmp14, tmp13) tl.device_assert((0 <= tmp16) & (tmp16 < 4) | ~xmask, 'index out of bounds: 0 <= tmp16 < 4') tmp18 = tl.load(in_ptr1 + (tmp16 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tmp18 * tmp18 tmp20 = tmp19 * tmp18 tmp21 = tmp20 * tmp10 tmp22 = triton_helpers.maximum(tmp11, tmp21) tmp24 = tmp23.to(tl.int64) tmp25 = tmp24 + tmp2 tmp26 = tmp24 < 0 tmp27 = tl.where(tmp26, tmp25, tmp24) tl.device_assert((0 <= tmp27) & (tmp27 < 4) | ~xmask, 'index out of bounds: 0 <= tmp27 < 4') tmp29 = tl.load(in_ptr1 + (tmp27 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tmp29 * tmp29 tmp31 = tmp30 * tmp29 tmp32 = tmp31 * tmp10 tmp33 = triton_helpers.maximum(tmp22, tmp32) tmp34 = tl_math.abs(tmp33) tmp35 = float('inf') tmp36 = tmp34 == tmp35 tmp37 = 0.0 tmp38 = tl.where(tmp36, tmp37, tmp33) tmp39 = tmp11 - tmp38 tmp40 = tl_math.exp(tmp39) tmp41 = tmp21 - tmp38 tmp42 = tl_math.exp(tmp41) tmp43 = tmp40 + tmp42 tmp44 = tmp32 - tmp38 tmp45 = tl_math.exp(tmp44) tmp46 = tmp43 + tmp45 tl.store(out_ptr0 + x0, tmp33, xmask) tl.store(out_ptr1 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_stack_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl_math.log(tmp5) tmp7 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl_math.abs(tmp7) tmp9 = float('inf') tmp10 = tmp8 == tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp10, tmp11, tmp7) tmp13 = tmp6 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tmp17 = tl.full([1], 2, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr2 + (2 + 4 * x1), tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tmp20.to(tl.int64) tmp22 = tl.full([XBLOCK], 4, tl.int32) tmp23 = tmp21 + tmp22 tmp24 = tmp21 < 0 tmp25 = tl.where(tmp24, tmp23, tmp21) tl.device_assert((0 <= tl.broadcast_to(tmp25, [XBLOCK])) & (tl. broadcast_to(tmp25, [XBLOCK]) < 4) | ~(tmp19 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp25, [XBLOCK]) < 4') tmp27 = tl.load(in_ptr3 + (tmp25 + 4 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp27 * tmp27 tmp29 = tmp28 * tmp27 tmp30 = -3.0 tmp31 = tmp29 * tmp30 tmp32 = tl.load(in_ptr2 + (3 + 4 * x1), tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 + tmp22 tmp35 = tmp33 < 0 tmp36 = tl.where(tmp35, tmp34, tmp33) tl.device_assert((0 <= tl.broadcast_to(tmp36, [XBLOCK])) & (tl. broadcast_to(tmp36, [XBLOCK]) < 4) | ~(tmp19 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp36, [XBLOCK]) < 4') tmp38 = tl.load(in_ptr3 + (tmp36 + 4 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tmp38 * tmp38 tmp40 = tmp39 * tmp38 tmp41 = tmp40 * tmp30 tmp42 = triton_helpers.maximum(tmp31, tmp41) tmp43 = tl_math.abs(tmp42) tmp44 = tmp43 == tmp9 tmp45 = tl.where(tmp44, tmp11, tmp42) tmp46 = tmp31 - tmp45 tmp47 = tl_math.exp(tmp46) tmp48 = tmp41 - tmp45 tmp49 = tl_math.exp(tmp48) tmp50 = tmp47 + tmp49 tmp51 = tl_math.log(tmp50) tmp52 = tmp51 + tmp45 tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp19, tmp52, tmp53) tmp55 = tmp0 >= tmp17 tl.full([1], 3, tl.int64) tmp58 = tl.load(in_ptr2 + (3 + 4 * x1), tmp55 & xmask, eviction_policy= 'evict_last', other=0.0) tmp59 = tmp58.to(tl.int64) tmp60 = tmp59 + tmp22 tmp61 = tmp59 < 0 tmp62 = tl.where(tmp61, tmp60, tmp59) tl.device_assert((0 <= tl.broadcast_to(tmp62, [XBLOCK])) & (tl. broadcast_to(tmp62, [XBLOCK]) < 4) | ~(tmp55 & xmask), 'index out of bounds: 0 <= tl.broadcast_to(tmp62, [XBLOCK]) < 4') tmp64 = tl.load(in_ptr3 + (tmp62 + 4 * x1), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tmp64 * tmp64 tmp66 = tmp65 * tmp64 tmp67 = tmp66 * tmp30 tmp68 = tl_math.abs(tmp67) tmp69 = tmp68 == tmp9 tmp70 = tl.where(tmp69, tmp11, tmp67) tmp71 = tmp67 - tmp70 tmp72 = tl_math.exp(tmp71) tmp73 = tl_math.log(tmp72) tmp74 = tmp73 + tmp70 tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp55, tmp74, tmp75) tmp77 = tl.where(tmp19, tmp54, tmp76) tmp78 = tl.where(tmp4, tmp15, tmp77) tl.store(out_ptr0 + x2, tmp78, xmask) @triton.jit def triton_per_fused_gather_mean_mul_pow_sub_sum_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + 3 * r0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1 + 3 * r0), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr2 + (2 + 3 * r0), None, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4), 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (tmp5 + 4 * r0), None, eviction_policy= 'evict_last') tmp8 = tmp7 * tmp7 tmp9 = tmp8 * tmp7 tmp10 = -3.0 tmp11 = tmp9 * tmp10 tmp13 = tmp11 - tmp12 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 + tmp2 tmp17 = tmp15 < 0 tmp18 = tl.where(tmp17, tmp16, tmp15) tl.device_assert((0 <= tmp18) & (tmp18 < 4), 'index out of bounds: 0 <= tmp18 < 4') tmp20 = tl.load(in_ptr1 + (tmp18 + 4 * r0), None, eviction_policy= 'evict_last') tmp21 = tmp20 * tmp20 tmp22 = tmp21 * tmp20 tmp23 = tmp22 * tmp10 tmp25 = tmp23 - tmp24 tmp26 = tmp13 + tmp25 tmp28 = tmp27.to(tl.int64) tmp29 = tmp28 + tmp2 tmp30 = tmp28 < 0 tmp31 = tl.where(tmp30, tmp29, tmp28) tl.device_assert((0 <= tmp31) & (tmp31 < 4), 'index out of bounds: 0 <= tmp31 < 4') tmp33 = tl.load(in_ptr1 + (tmp31 + 4 * r0), None, eviction_policy= 'evict_last') tmp34 = tmp33 * tmp33 tmp35 = tmp34 * tmp33 tmp36 = tmp35 * tmp10 tmp38 = tmp36 - tmp37 tmp39 = tmp26 + tmp38 tmp40 = -1.0 tmp41 = tmp39 * tmp40 tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = 4.0 tmp46 = tmp44 / tmp45 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp46, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_clamp_mul_sqrt_sub_0[grid(16)](buf2, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1[grid(4)](buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int16) triton_per_fused_mul_pow_sort_2[grid(4)](buf2, buf5, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) buf6 = buf2 del buf2 extern_kernels.mm(arg1_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf6) buf7 = buf6 del buf6 buf8 = buf7 del buf7 triton_poi_fused_add_clamp_mul_sqrt_sub_0[grid(16)](buf8, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1[grid(4)](buf8, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf11 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_gather_logsumexp_mul_pow_3[grid(4)](buf5, buf8, buf10, buf11, 4, XBLOCK=4, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 3), (3, 1), torch.float32) triton_poi_fused_stack_4[grid(12)](buf11, buf10, buf5, buf8, buf12, 12, XBLOCK=16, num_warps=1, num_stages=1) del buf10 del buf11 buf14 = empty_strided_cuda((), (), torch.float32) buf15 = buf14 del buf14 triton_per_fused_gather_mean_mul_pow_sub_sum_5[grid(1)](buf15, buf5, buf8, buf12, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf12 del buf5 del buf8 return buf15, def pdist(e, squared=False, eps=1e-12): e_square = e.pow(2).sum(dim=1) prod = e @ e.t() res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clamp(min =eps) if not squared: res = res.sqrt() res = res.clone() res[range(len(e)), range(len(e))] = 0 return res class HardDarkRankNew(nn.Module): def __init__(self, alpha=3, beta=3, permute_len=4): super().__init__() self.alpha = alpha self.beta = beta self.permute_len = permute_len def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ahu-hpt/AOMD
HardDarkRank
false
3,049
[ "Apache-2.0" ]
0
8d99dbb803feaef55fc089bfb3399d2fb21d55d8
https://github.com/ahu-hpt/AOMD/tree/8d99dbb803feaef55fc089bfb3399d2fb21d55d8
Attention
import torch import torch.nn as nn class Attention(nn.Module): def __init__(self, num_channels, embed_size, dropout=True): """Stacked attention Module """ super(Attention, self).__init__() self.ff_image = nn.Linear(embed_size, num_channels) self.ff_questions = nn.Linear(embed_size, num_channels) self.dropout = nn.Dropout(p=0.5) self.ff_attention = nn.Linear(num_channels, 1) def forward(self, vi, vq): """Extract feature vector from image vector. """ hi = self.ff_image(vi) hq = self.ff_questions(vq).unsqueeze(dim=1) ha = torch.tanh(hi + hq) if self.dropout: ha = self.dropout(ha) ha = self.ff_attention(ha) pi = torch.softmax(ha, dim=1) self.pi = pi vi_attended = (pi * vi).sum(dim=1) u = vi_attended + vq return u def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x3 = xindex // 256 x5 = xindex % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + x4, xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x4, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_2, buf1, primals_5, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_5 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf0 triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_mul_sum_3[grid(256)](buf6, primals_3, primals_6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf7, buf6, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, buf6, primals_7 class AttentionNew(nn.Module): def __init__(self, num_channels, embed_size, dropout=True): """Stacked attention Module """ super(AttentionNew, self).__init__() self.ff_image = nn.Linear(embed_size, num_channels) self.ff_questions = nn.Linear(embed_size, num_channels) self.dropout = nn.Dropout(p=0.5) self.ff_attention = nn.Linear(num_channels, 1) def forward(self, input_0, input_1): primals_1 = self.ff_image.weight primals_2 = self.ff_image.bias primals_4 = self.ff_questions.weight primals_5 = self.ff_questions.bias primals_7 = self.ff_attention.weight primals_8 = self.ff_attention.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
ahmed563/Group14_Project_DLSpring2021
Attention
false
3,050
[ "MIT" ]
0
d2b03555cadb483ae472b613f107173f89c07d9b
https://github.com/ahmed563/Group14_Project_DLSpring2021/tree/d2b03555cadb483ae472b613f107173f89c07d9b
Net
import torch import torch.nn as nn import torch.nn.functional as F def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class Net(nn.Module): def __init__(self, s_dim, a_dim, hidden=16): super(Net, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, hidden) self.pi2 = nn.Linear(hidden, a_dim) self.v1 = nn.Linear(s_dim, hidden) self.v2 = nn.Linear(hidden, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical self.initialized = True def forward(self, x): x = torch.flatten(x, start_dim=1) pi1 = torch.tanh(self.pi1(x)) logits = self.pi2(pi1) v1 = torch.tanh(self.v1(x)) values = self.v2(v1) return logits, values def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'s_dim': 4, 'a_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (1, 16), (16, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf3) del primals_6 buf4 = buf3 del buf3 triton_poi_fused_tanh_0[grid(64)](buf4, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (16, 1), (1, 16), 0), alpha=1, beta=1, out=buf6) del primals_9 return buf2, buf6, primals_1, buf1, buf4, primals_8, primals_4 def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class NetNew(nn.Module): def __init__(self, s_dim, a_dim, hidden=16): super(NetNew, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, hidden) self.pi2 = nn.Linear(hidden, a_dim) self.v1 = nn.Linear(s_dim, hidden) self.v2 = nn.Linear(hidden, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical self.initialized = True def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss def forward(self, input_0): primals_2 = self.pi1.weight primals_3 = self.pi1.bias primals_4 = self.pi2.weight primals_5 = self.pi2.bias primals_6 = self.v1.weight primals_7 = self.v1.bias primals_8 = self.v2.weight primals_9 = self.v2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
aivaslab/marlgrid
Net
false
3,051
[ "Apache-2.0" ]
0
10b53d27ce224fadeeb5830d6034350a69feb4b4
https://github.com/aivaslab/marlgrid/tree/10b53d27ce224fadeeb5830d6034350a69feb4b4
Actor
import torch import numpy as np from torch import nn from torch.nn import functional as F class Actor(nn.Module): def __init__(self, input_dim, output_dim): super(Actor, self).__init__() self.layer1 = nn.Linear(input_dim, output_dim, bias=True) def _format(self, state): x = state if not isinstance(x, torch.Tensor): x = torch.tensor(x, dtype=torch.float32) x = x.unsqueeze(0) return x def forward(self, a, p): input = self._format(torch.cat((a, p), dim=1)) output = self.layer1(input) output = torch.clamp(output, 0, 1) output = F.normalize(output, dim=0) return output def full_pass(self, a, p): logits = self.forward(a, p) dist = torch.distributions.Categorical(logits=logits) action = dist.sample() logpa = dist.log_prob(action).unsqueeze(-1) entropy = dist.entropy().unsqueeze(-1) is_exploratory = action != np.argmax(logits.detach().numpy()) return action.item(), is_exploratory.item(), logpa, entropy def select_action(self, a, p): logits = self.forward(a, p) dist = torch.distributions.Categorical(logits=logits) action = dist.sample() return action.item() def select_greedy_action(self, a, p): logits = self.forward(a, p) return np.argmax(logits.detach().numpy()) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_clamp_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (256 + x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (384 + x0), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = triton_helpers.minimum(tmp6, tmp3) tmp8 = tmp7 * tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = triton_helpers.minimum(tmp10, tmp3) tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp15 = triton_helpers.maximum(tmp14, tmp1) tmp16 = triton_helpers.minimum(tmp15, tmp3) tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp20 = triton_helpers.maximum(tmp19, tmp1) tmp21 = triton_helpers.minimum(tmp20, tmp3) tmp22 = tmp21 * tmp21 tmp23 = tmp18 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-12 tmp26 = triton_helpers.maximum(tmp24, tmp25) tmp27 = tmp4 / tmp26 tl.store(out_ptr0 + x2, tmp27, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_clamp_div_1[grid(512)](buf1, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), buf1 class ActorNew(nn.Module): def __init__(self, input_dim, output_dim): super(ActorNew, self).__init__() self.layer1 = nn.Linear(input_dim, output_dim, bias=True) def _format(self, state): x = state if not isinstance(x, torch.Tensor): x = torch.tensor(x, dtype=torch.float32) x = x.unsqueeze(0) return x def full_pass(self, a, p): logits = self.forward(a, p) dist = torch.distributions.Categorical(logits=logits) action = dist.sample() logpa = dist.log_prob(action).unsqueeze(-1) entropy = dist.entropy().unsqueeze(-1) is_exploratory = action != np.argmax(logits.detach().numpy()) return action.item(), is_exploratory.item(), logpa, entropy def select_action(self, a, p): logits = self.forward(a, p) dist = torch.distributions.Categorical(logits=logits) action = dist.sample() return action.item() def select_greedy_action(self, a, p): logits = self.forward(a, p) return np.argmax(logits.detach().numpy()) def forward(self, input_0, input_1): primals_3 = self.layer1.weight primals_4 = self.layer1.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ajy8456/active-recognition
Actor
false
3,052
[ "MIT" ]
0
7d3a4bbfceaf5fb32cd43f62636f36a10ab63807
https://github.com/ajy8456/active-recognition/tree/7d3a4bbfceaf5fb32cd43f62636f36a10ab63807
ContractingBlock
import torch from torch import nn from torch.nn import functional as F class ContractingBlock(nn.Module): """ ContractingBlock Class Performs two convolutions followed by a max pool operation. Values: input_channels: the number of channels to expect from a given input """ def __init__(self, in_channels, out_channels): super().__init__() self.conv3x3_0 = nn.Conv2d(in_channels, out_channels, kernel_size=3) self.conv3x3_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3) def forward(self, x): """ Function for completing a forward pass of ContractingBlock: Given an image tensor, completes a contracting block and returns the transformed tensor. Parameters: x: image tensor of shape (batch size, channels, height, width) """ fx = F.relu(self.conv3x3_0(x)) fx = F.relu(self.conv3x3_1(fx)) return fx def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 3600 % 4 x0 = xindex % 3600 x3 = xindex // 3600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x0 + 3712 * x3), tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 62, 62), (15376, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(61504)](buf1, primals_2, 61504, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 60, 60), (14400, 3600, 60, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 60, 60), (14848, 3712, 60, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(57600)]( buf3, primals_5, buf4, 57600, XBLOCK=512, num_warps=4, num_stages=1 ) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf4 class ContractingBlockNew(nn.Module): """ ContractingBlock Class Performs two convolutions followed by a max pool operation. Values: input_channels: the number of channels to expect from a given input """ def __init__(self, in_channels, out_channels): super().__init__() self.conv3x3_0 = nn.Conv2d(in_channels, out_channels, kernel_size=3) self.conv3x3_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3) def forward(self, input_0): primals_1 = self.conv3x3_0.weight primals_2 = self.conv3x3_0.bias primals_4 = self.conv3x3_1.weight primals_5 = self.conv3x3_1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
akanametov/unet-pytorch
ContractingBlock
false
3,054
[ "MIT" ]
0
6cf0f70674958356ea4ac36fe61b0415921f72ae
https://github.com/akanametov/unet-pytorch/tree/6cf0f70674958356ea4ac36fe61b0415921f72ae
SqueezeExcitation
import torch import torch.nn as nn import torch.nn.functional as F def make_divisible(v, divisor=8, min_value=None): """ The channel number of each layer should be divisable by 8. The function is taken from github.com/rwightman/pytorch-image-models/master/timm/models/layers/helpers.py """ min_value = min_value or divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v class SqueezeExcitation(nn.Module): def __init__(self, in_channels: 'int', reduction: 'int'=4, out_channels: 'int'=-1, **kwargs: dict): super(SqueezeExcitation, self).__init__() assert in_channels > 0 num_reduced_channels = make_divisible(max(out_channels, 8) // reduction, 8) self.fc1 = nn.Conv2d(in_channels, num_reduced_channels, kernel_size=1) self.fc2 = nn.Conv2d(num_reduced_channels, in_channels, kernel_size=1) self.activation = nn.ReLU(inplace=True) self.sigmoid = nn.Sigmoid() def forward(self, inp): x = F.adaptive_avg_pool2d(inp, 1) x = self.activation(self.fc1(x)) x = self.sigmoid(self.fc2(x)) return x + inp def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 + tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(32)](buf3, primals_3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_sigmoid_3[grid(256)](buf5, primals_1, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf6, primals_2, primals_4, buf1, buf3, buf5 def make_divisible(v, divisor=8, min_value=None): """ The channel number of each layer should be divisable by 8. The function is taken from github.com/rwightman/pytorch-image-models/master/timm/models/layers/helpers.py """ min_value = min_value or divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v class SqueezeExcitationNew(nn.Module): def __init__(self, in_channels: 'int', reduction: 'int'=4, out_channels: 'int'=-1, **kwargs: dict): super(SqueezeExcitationNew, self).__init__() assert in_channels > 0 num_reduced_channels = make_divisible(max(out_channels, 8) // reduction, 8) self.fc1 = nn.Conv2d(in_channels, num_reduced_channels, kernel_size=1) self.fc2 = nn.Conv2d(num_reduced_channels, in_channels, kernel_size=1) self.activation = nn.ReLU(inplace=True) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
akashAD98/EfficientNetv2-with-Detectron2
SqueezeExcitation
false
3,055
[ "Apache-2.0" ]
0
1ba7f32cda31550ed4a040c15271612fa3f73d74
https://github.com/akashAD98/EfficientNetv2-with-Detectron2/tree/1ba7f32cda31550ed4a040c15271612fa3f73d74
SimpleGCN
import math import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter import torch.nn import torch.autograd class SimpleGCN(nn.Module): """A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 .. note:: If you use this code, please cite the original paper in addition to Kaolin. .. code-block:: @article{kipf2016semi, title={Semi-Supervised Classification with Graph Convolutional Networks}, author={Kipf, Thomas N and Welling, Max}, journal={arXiv preprint arXiv:1609.02907}, year={2016} } """ def __init__(self, in_features, out_features, bias=True): super(SimpleGCN, self).__init__() self.in_features = in_features self.out_features = out_features self.weight1 = Parameter(torch.Tensor(in_features, out_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 6.0 / math.sqrt(self.weight1.size(1) + self.weight1.size(0)) stdv *= 0.6 self.weight1.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-0.1, 0.1) def forward(self, input, adj): support = torch.mm(input, self.weight1) side_len = max(support.shape[1] // 3, 2) if adj.type() == 'torch.cuda.sparse.FloatTensor': norm = torch.sparse.mm(adj, torch.ones((support.shape[0], 1))) normalized_support = support[:, :side_len] / norm side_1 = torch.sparse.mm(adj, normalized_support) else: side_1 = torch.mm(adj, support[:, :side_len]) side_2 = support[:, side_len:] output = torch.cat((side_1, side_2), dim=1) if self.bias is not None: output = output + self.bias return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter import torch.nn import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp11 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (2 + 4 * x1 + (-2 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp12 = tmp10 + tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 2), (4, 1 ), 0), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_cat_0[grid(16)](buf1, buf0, primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del primals_4 return buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class SimpleGCNNew(nn.Module): """A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 .. note:: If you use this code, please cite the original paper in addition to Kaolin. .. code-block:: @article{kipf2016semi, title={Semi-Supervised Classification with Graph Convolutional Networks}, author={Kipf, Thomas N and Welling, Max}, journal={arXiv preprint arXiv:1609.02907}, year={2016} } """ def __init__(self, in_features, out_features, bias=True): super(SimpleGCNNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight1 = Parameter(torch.Tensor(in_features, out_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 6.0 / math.sqrt(self.weight1.size(1) + self.weight1.size(0)) stdv *= 0.6 self.weight1.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-0.1, 0.1) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def forward(self, input_0, input_1): primals_1 = self.weight1 primals_4 = self.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
akashgokul/kaolin
SimpleGCN
false
3,056
[ "ECL-2.0", "Apache-2.0" ]
0
6360c4f2bcdd81f461dfb4d96267e79d89d5e112
https://github.com/akashgokul/kaolin/tree/6360c4f2bcdd81f461dfb4d96267e79d89d5e112
Discriminator
import torch from torch import nn class Discriminator(nn.Module): def __init__(self, in_features=1): super(Discriminator, self).__init__() self.conv1 = nn.Conv2d(in_features, 96, kernel_size=7, stride=4) self.relu = nn.ReLU() self.conv2 = nn.Conv2d(96, 64, kernel_size=5, stride=2) self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2) self.conv3 = nn.Conv2d(64, 32, kernel_size=3, stride=2) self.conv4 = nn.Conv2d(32, 32, kernel_size=1, stride=1) self.conv5 = nn.Conv2d(32, 2, kernel_size=1, stride=1) def forward(self, x): out = self.conv1(x) out = self.relu(out) out = self.conv2(out) out = self.relu(out) out = self.max_pool(out) out = self.conv3(out) out = self.relu(out) out = self.conv4(out) out = self.relu(out) out = self.conv5(out) out = self.relu(out) return out.view(out.size(0), -1, 2) def get_inputs(): return [torch.rand([4, 1, 128, 128])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 369024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 961 % 96 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (14 + 2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (15 + 2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (16 + 2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (28 + 2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (29 + 2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (30 + 2 * x0 + 28 * x1 + 196 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x3, tmp16, xmask) tl.store(out_ptr1 + x3, tmp41, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (96, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_2, (96,), (1,)) assert_size_stride(primals_3, (4, 1, 128, 128), (16384, 16384, 128, 1)) assert_size_stride(primals_4, (64, 96, 5, 5), (2400, 25, 5, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (2, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_11, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 31, 31), (92256, 961, 31, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(369024)](buf1, primals_2, 369024, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 14, 14), (12544, 196, 14, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(50176)](buf3, primals_5, 50176, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch. float32) buf5 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_2[grid(9216)](buf3, buf4, buf5, 9216, XBLOCK=128, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 2, 2), (128, 4, 2, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(512)](buf7, primals_7, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 2, 2), (128, 4, 2, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_3[grid(512)](buf9, primals_9, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 2, 2, 2), (8, 4, 2, 1)) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 2, 2, 2), (8, 4, 2, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(32)](buf11, primals_11, buf12, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_11 return (reinterpret_tensor(buf11, (4, 4, 2), (8, 2, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf4, buf5, buf7, buf9, buf12) class DiscriminatorNew(nn.Module): def __init__(self, in_features=1): super(DiscriminatorNew, self).__init__() self.conv1 = nn.Conv2d(in_features, 96, kernel_size=7, stride=4) self.relu = nn.ReLU() self.conv2 = nn.Conv2d(96, 64, kernel_size=5, stride=2) self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2) self.conv3 = nn.Conv2d(64, 32, kernel_size=3, stride=2) self.conv4 = nn.Conv2d(32, 32, kernel_size=1, stride=1) self.conv5 = nn.Conv2d(32, 2, kernel_size=1, stride=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.conv5.weight primals_11 = self.conv5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
ajdillhoff/simgan-pytorch
Discriminator
false
3,057
[ "MIT" ]
0
fb61241a85136aae770944e1496f9319df327561
https://github.com/ajdillhoff/simgan-pytorch/tree/fb61241a85136aae770944e1496f9319df327561
Encoder
import torch import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): def __init__(self, input_shape, output_shape): super(Encoder, self).__init__() self.input_shape = input_shape self.encoder_out_shape = output_shape self.linear_one = nn.Linear(self.input_shape, 400) self.linear_two = nn.Linear(400, 200) self.linear_three = nn.Linear(200, 100) self.linear_four = nn.Linear(100, self.encoder_out_shape) def forward(self, x): x = F.relu(self.linear_one(x)) x = F.relu(self.linear_two(x)) x = F.relu(self.linear_three(x)) x = F.relu(self.linear_four(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': 4, 'output_shape': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 400), (400, 1)) assert_size_stride(primals_5, (200,), (1,)) assert_size_stride(primals_6, (100, 200), (200, 1)) assert_size_stride(primals_7, (100,), (1,)) assert_size_stride(primals_8, (4, 100), (100, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf11 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf11, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 200), (1, 400), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 200), (3200, 800, 200, 1), 0) del buf2 buf10 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(12800)](buf3, primals_5, buf10, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 100), (1, 200), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf4 buf9 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(6400)](buf5, primals_7, buf9, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 100), (100, 1), 0), reinterpret_tensor(primals_8, (100, 4), (1, 100), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(256)](buf7, primals_9, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), reinterpret_tensor(buf3, (64, 200), (200, 1), 0 ), reinterpret_tensor(buf5, (64, 100), (100, 1), 0 ), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11 class EncoderNew(nn.Module): def __init__(self, input_shape, output_shape): super(EncoderNew, self).__init__() self.input_shape = input_shape self.encoder_out_shape = output_shape self.linear_one = nn.Linear(self.input_shape, 400) self.linear_two = nn.Linear(400, 200) self.linear_three = nn.Linear(200, 100) self.linear_four = nn.Linear(100, self.encoder_out_shape) def forward(self, input_0): primals_1 = self.linear_one.weight primals_2 = self.linear_one.bias primals_4 = self.linear_two.weight primals_5 = self.linear_two.bias primals_6 = self.linear_three.weight primals_7 = self.linear_three.bias primals_8 = self.linear_four.weight primals_9 = self.linear_four.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
akaprasanga/AutoEncoder
Encoder
false
3,058
[ "MIT" ]
0
a1562a7a720c199b717796e469b9957eb958264a
https://github.com/akaprasanga/AutoEncoder/tree/a1562a7a720c199b717796e469b9957eb958264a
Decoder
import torch import torch.nn as nn import torch.nn.functional as F class Decoder(nn.Module): def __init__(self, input_shape, output_shape): super(Decoder, self).__init__() self.input_shape = input_shape self.decoder_out_shape = output_shape self.linear_one = nn.Linear(self.input_shape, 100) self.linear_two = nn.Linear(100, 200) self.linear_three = nn.Linear(200, 400) self.linear_four = nn.Linear(400, self.decoder_out_shape) def forward(self, x): x = F.relu(self.linear_one(x)) x = F.relu(self.linear_two(x)) x = F.relu(self.linear_three(x)) x = self.linear_four(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': 4, 'output_shape': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (200, 100), (100, 1)) assert_size_stride(primals_5, (200,), (1,)) assert_size_stride(primals_6, (400, 200), (200, 1)) assert_size_stride(primals_7, (400,), (1,)) assert_size_stride(primals_8, (4, 400), (400, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1, primals_2, buf9, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 200), (1, 100), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 200), (3200, 800, 200, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(12800)](buf3, primals_5, buf8, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 400), (1, 200), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(25600)](buf5, primals_7, buf7, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 400), (400, 1), 0), reinterpret_tensor(primals_8, (400, 4), (1, 400), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 100), (100, 1), 0 ), reinterpret_tensor(buf3, (64, 200), (200, 1), 0 ), reinterpret_tensor(buf5, (64, 400), (400, 1), 0 ), primals_8, buf7, primals_6, buf8, primals_4, buf9 class DecoderNew(nn.Module): def __init__(self, input_shape, output_shape): super(DecoderNew, self).__init__() self.input_shape = input_shape self.decoder_out_shape = output_shape self.linear_one = nn.Linear(self.input_shape, 100) self.linear_two = nn.Linear(100, 200) self.linear_three = nn.Linear(200, 400) self.linear_four = nn.Linear(400, self.decoder_out_shape) def forward(self, input_0): primals_1 = self.linear_one.weight primals_2 = self.linear_one.bias primals_4 = self.linear_two.weight primals_5 = self.linear_two.bias primals_6 = self.linear_three.weight primals_7 = self.linear_three.bias primals_8 = self.linear_four.weight primals_9 = self.linear_four.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
akaprasanga/AutoEncoder
Decoder
false
3,059
[ "MIT" ]
0
a1562a7a720c199b717796e469b9957eb958264a
https://github.com/akaprasanga/AutoEncoder/tree/a1562a7a720c199b717796e469b9957eb958264a
Model
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, input, hidden, output): super(Model, self).__init__() self.l1 = nn.Linear(input, hidden) self.l2 = nn.Linear(hidden, hidden) self.l3 = nn.Linear(hidden, 2) def forward(self, x): out = F.relu(self.l1(x)) out = F.relu(self.l2(out)) out = self.l3(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input': 4, 'hidden': 4, 'output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (2, 4), (4, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6 class ModelNew(nn.Module): def __init__(self, input, hidden, output): super(ModelNew, self).__init__() self.l1 = nn.Linear(input, hidden) self.l2 = nn.Linear(hidden, hidden) self.l3 = nn.Linear(hidden, 2) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
akapoorx00/machinelearning-stuff
Model
false
3,060
[ "Apache-2.0" ]
0
53184019b77d3387fd15b13d3bfa75529b8ed003
https://github.com/akapoorx00/machinelearning-stuff/tree/53184019b77d3387fd15b13d3bfa75529b8ed003
DiscShiftLoss
import torch import torch.nn as nn class DiscShiftLoss(nn.Module): """Disc shift loss. Args: loss_weight (float, optional): Loss weight. Defaults to 1.0. """ def __init__(self, loss_weight=0.1): super().__init__() self.loss_weight = loss_weight def forward(self, x): """Forward function. Args: x (Tensor): Tensor with shape (n, c, h, w) Returns: Tensor: Loss. """ loss = torch.mean(x ** 2) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tmp5 = 256.0 tmp6 = tmp4 / tmp5 tmp7 = 0.1 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_pow_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class DiscShiftLossNew(nn.Module): """Disc shift loss. Args: loss_weight (float, optional): Loss weight. Defaults to 1.0. """ def __init__(self, loss_weight=0.1): super().__init__() self.loss_weight = loss_weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
akimotty877/mmediting
DiscShiftLoss
false
3,061
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
Net
import torch from torch import nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, input, hidden, output): super(Net, self).__init__() self.l1 = nn.Linear(input, hidden) self.l2 = nn.Linear(hidden, hidden) self.l3 = nn.Linear(hidden, hidden) self.l4 = nn.Linear(hidden, hidden) self.l5 = nn.Linear(hidden, output) def forward(self, x): out = F.relu(self.l1(x)) out = F.relu(self.l2(out)) out = F.relu(self.l3(out)) out = F.relu(self.l4(out)) out = F.log_softmax(self.l5(out), dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input': 4, 'hidden': 4, 'output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5, primals_7, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf7, primals_9, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_11 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf8, buf9, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__log_softmax_2[grid(256)](buf9, buf10, 256, XBLOCK =256, num_warps=4, num_stages=1) del buf9 return (buf10, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf10, primals_10, buf11, primals_8, buf12, primals_6, buf13, primals_4, buf14) class NetNew(nn.Module): def __init__(self, input, hidden, output): super(NetNew, self).__init__() self.l1 = nn.Linear(input, hidden) self.l2 = nn.Linear(hidden, hidden) self.l3 = nn.Linear(hidden, hidden) self.l4 = nn.Linear(hidden, hidden) self.l5 = nn.Linear(hidden, output) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_8 = self.l4.weight primals_9 = self.l4.bias primals_10 = self.l5.weight primals_11 = self.l5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
akapoorx00/machinelearning-stuff
Net
false
3,062
[ "Apache-2.0" ]
0
53184019b77d3387fd15b13d3bfa75529b8ed003
https://github.com/akapoorx00/machinelearning-stuff/tree/53184019b77d3387fd15b13d3bfa75529b8ed003
CharbonnierCompLoss
import functools import torch import torch.nn as nn from torch.nn import functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss if reduction_enum == 1: return loss.mean() return loss.sum() def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. Default: None. reduction (str): Same as built-in losses of PyTorch. Options are "none", "mean" and "sum". Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if weight is None or reduction == 'sum': loss = reduce_loss(loss, reduction) elif reduction == 'mean': if weight.size(1) == 1: weight = weight.expand_as(loss) eps = 1e-12 if sample_wise: weight = weight.sum(dim=[1, 2, 3], keepdim=True) loss = (loss / (weight + eps)).sum() / weight.size(0) else: loss = loss.sum() / (weight.sum() + eps) return loss def masked_loss(loss_func): """Create a masked version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @masked_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', sample_wise= False, **kwargs): loss = loss_func(pred, target, **kwargs) loss = mask_reduce_loss(loss, weight, reduction, sample_wise) return loss return wrapper @masked_loss def charbonnier_loss(pred, target, eps=1e-12): """Charbonnier loss. Args: pred (Tensor): Prediction Tensor with shape (n, c, h, w). target ([type]): Target Tensor with shape (n, c, h, w). Returns: Tensor: Calculated Charbonnier loss. """ return torch.sqrt((pred - target) ** 2 + eps) class CharbonnierCompLoss(nn.Module): """Charbonnier composition loss. Args: loss_weight (float): Loss weight for L1 loss. Default: 1.0. reduction (str): Specifies the reduction to apply to the output. Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. eps (float): A value used to control the curvature near zero. Default: 1e-12. """ def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False, eps=1e-12): super().__init__() if reduction not in ['none', 'mean', 'sum']: raise ValueError( f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}' ) self.loss_weight = loss_weight self.reduction = reduction self.sample_wise = sample_wise self.eps = eps def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs): """ Args: pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte. fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object. bg (Tensor): of shape (N, 3, H, W). Tensor of background object. ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged image before normalized by ImageNet mean and std. weight (Tensor, optional): of shape (N, 1, H, W). It is an indicating matrix: weight[trimap == 128] = 1. Default: None. """ pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg if weight is not None: weight = weight.expand(-1, 3, -1, -1) return self.loss_weight * charbonnier_loss(pred_merged, ori_merged, weight, eps=self.eps, reduction=self.reduction, sample_wise= self.sample_wise) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import functools import torch.nn as nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp5 = tl.load(in_ptr2 + r0, None) tmp8 = tl.load(in_ptr3 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = 1e-12 tmp12 = tmp10 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tmp19 = tmp18 * tmp3 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss if reduction_enum == 1: return loss.mean() return loss.sum() def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. Default: None. reduction (str): Same as built-in losses of PyTorch. Options are "none", "mean" and "sum". Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if weight is None or reduction == 'sum': loss = reduce_loss(loss, reduction) elif reduction == 'mean': if weight.size(1) == 1: weight = weight.expand_as(loss) eps = 1e-12 if sample_wise: weight = weight.sum(dim=[1, 2, 3], keepdim=True) loss = (loss / (weight + eps)).sum() / weight.size(0) else: loss = loss.sum() / (weight.sum() + eps) return loss def masked_loss(loss_func): """Create a masked version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @masked_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', sample_wise= False, **kwargs): loss = loss_func(pred, target, **kwargs) loss = mask_reduce_loss(loss, weight, reduction, sample_wise) return loss return wrapper @masked_loss def charbonnier_loss(pred, target, eps=1e-12): """Charbonnier loss. Args: pred (Tensor): Prediction Tensor with shape (n, c, h, w). target ([type]): Target Tensor with shape (n, c, h, w). Returns: Tensor: Calculated Charbonnier loss. """ return torch.sqrt((pred - target) ** 2 + eps) class CharbonnierCompLossNew(nn.Module): """Charbonnier composition loss. Args: loss_weight (float): Loss weight for L1 loss. Default: 1.0. reduction (str): Specifies the reduction to apply to the output. Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. eps (float): A value used to control the curvature near zero. Default: 1e-12. """ def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False, eps=1e-12): super().__init__() if reduction not in ['none', 'mean', 'sum']: raise ValueError( f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}' ) self.loss_weight = loss_weight self.reduction = reduction self.sample_wise = sample_wise self.eps = eps def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
akimotty877/mmediting
CharbonnierCompLoss
false
3,063
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
BboxHead
import torch from torch import nn import torch.nn class BboxHead(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(BboxHead, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) return out def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 48 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 12 y1 = yindex // 12 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 49152 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (12, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 12, 64, 64), (49152, 1, 768, 12)) buf2 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_1[grid(48, 4096)](buf1, primals_2, buf2, 48, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf1 del primals_2 return buf2, primals_1, buf0 class BboxHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(BboxHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ZongqingHou/Pytorch_Retinaface
BboxHead
false
3,064
[ "MIT" ]
0
6284b7158a0d9d3d4a2cc267a393c21863a1b938
https://github.com/ZongqingHou/Pytorch_Retinaface/tree/6284b7158a0d9d3d4a2cc267a393c21863a1b938
MSECompositionLoss
import functools import torch import torch.nn as nn from torch.nn import functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss if reduction_enum == 1: return loss.mean() return loss.sum() def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. Default: None. reduction (str): Same as built-in losses of PyTorch. Options are "none", "mean" and "sum". Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if weight is None or reduction == 'sum': loss = reduce_loss(loss, reduction) elif reduction == 'mean': if weight.size(1) == 1: weight = weight.expand_as(loss) eps = 1e-12 if sample_wise: weight = weight.sum(dim=[1, 2, 3], keepdim=True) loss = (loss / (weight + eps)).sum() / weight.size(0) else: loss = loss.sum() / (weight.sum() + eps) return loss def masked_loss(loss_func): """Create a masked version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @masked_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', sample_wise= False, **kwargs): loss = loss_func(pred, target, **kwargs) loss = mask_reduce_loss(loss, weight, reduction, sample_wise) return loss return wrapper @masked_loss def mse_loss(pred, target): """MSE loss. Args: pred (Tensor): Prediction Tensor with shape (n, c, h, w). target ([type]): Target Tensor with shape (n, c, h, w). Returns: Tensor: Calculated MSE loss. """ return F.mse_loss(pred, target, reduction='none') class MSECompositionLoss(nn.Module): """MSE (L2) composition loss. Args: loss_weight (float): Loss weight for MSE loss. Default: 1.0. reduction (str): Specifies the reduction to apply to the output. Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. """ def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False): super().__init__() if reduction not in ['none', 'mean', 'sum']: raise ValueError( f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}' ) self.loss_weight = loss_weight self.reduction = reduction self.sample_wise = sample_wise def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs): """ Args: pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte. fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object. bg (Tensor): of shape (N, 3, H, W). Tensor of background object. ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged image before normalized by ImageNet mean and std. weight (Tensor, optional): of shape (N, 1, H, W). It is an indicating matrix: weight[trimap == 128] = 1. Default: None. """ pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg if weight is not None: weight = weight.expand(-1, 3, -1, -1) return self.loss_weight * mse_loss(pred_merged, ori_merged, weight, reduction=self.reduction, sample_wise=self.sample_wise) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import functools import torch.nn as nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mse_loss_mul_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp5 = tl.load(in_ptr2 + r0, None) tmp8 = tl.load(in_ptr3 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = tmp15 * tmp3 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_mse_loss_mul_rsub_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss if reduction_enum == 1: return loss.mean() return loss.sum() def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. Default: None. reduction (str): Same as built-in losses of PyTorch. Options are "none", "mean" and "sum". Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if weight is None or reduction == 'sum': loss = reduce_loss(loss, reduction) elif reduction == 'mean': if weight.size(1) == 1: weight = weight.expand_as(loss) eps = 1e-12 if sample_wise: weight = weight.sum(dim=[1, 2, 3], keepdim=True) loss = (loss / (weight + eps)).sum() / weight.size(0) else: loss = loss.sum() / (weight.sum() + eps) return loss def masked_loss(loss_func): """Create a masked version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @masked_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', sample_wise= False, **kwargs): loss = loss_func(pred, target, **kwargs) loss = mask_reduce_loss(loss, weight, reduction, sample_wise) return loss return wrapper @masked_loss def mse_loss(pred, target): """MSE loss. Args: pred (Tensor): Prediction Tensor with shape (n, c, h, w). target ([type]): Target Tensor with shape (n, c, h, w). Returns: Tensor: Calculated MSE loss. """ return F.mse_loss(pred, target, reduction='none') class MSECompositionLossNew(nn.Module): """MSE (L2) composition loss. Args: loss_weight (float): Loss weight for MSE loss. Default: 1.0. reduction (str): Specifies the reduction to apply to the output. Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. """ def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False): super().__init__() if reduction not in ['none', 'mean', 'sum']: raise ValueError( f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}' ) self.loss_weight = loss_weight self.reduction = reduction self.sample_wise = sample_wise def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
akimotty877/mmediting
MSECompositionLoss
false
3,065
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
L1CompositionLoss
import functools import torch import torch.nn as nn from torch.nn import functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss if reduction_enum == 1: return loss.mean() return loss.sum() def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. Default: None. reduction (str): Same as built-in losses of PyTorch. Options are "none", "mean" and "sum". Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if weight is None or reduction == 'sum': loss = reduce_loss(loss, reduction) elif reduction == 'mean': if weight.size(1) == 1: weight = weight.expand_as(loss) eps = 1e-12 if sample_wise: weight = weight.sum(dim=[1, 2, 3], keepdim=True) loss = (loss / (weight + eps)).sum() / weight.size(0) else: loss = loss.sum() / (weight.sum() + eps) return loss def masked_loss(loss_func): """Create a masked version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @masked_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', sample_wise= False, **kwargs): loss = loss_func(pred, target, **kwargs) loss = mask_reduce_loss(loss, weight, reduction, sample_wise) return loss return wrapper @masked_loss def l1_loss(pred, target): """L1 loss. Args: pred (Tensor): Prediction Tensor with shape (n, c, h, w). target ([type]): Target Tensor with shape (n, c, h, w). Returns: Tensor: Calculated L1 loss. """ return F.l1_loss(pred, target, reduction='none') class L1CompositionLoss(nn.Module): """L1 composition loss. Args: loss_weight (float): Loss weight for L1 loss. Default: 1.0. reduction (str): Specifies the reduction to apply to the output. Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. """ def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False): super().__init__() if reduction not in ['none', 'mean', 'sum']: raise ValueError( f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}' ) self.loss_weight = loss_weight self.reduction = reduction self.sample_wise = sample_wise def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs): """ Args: pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte. fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object. bg (Tensor): of shape (N, 3, H, W). Tensor of background object. ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged image before normalized by ImageNet mean and std. weight (Tensor, optional): of shape (N, 1, H, W). It is an indicating matrix: weight[trimap == 128] = 1. Default: None. """ pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg if weight is not None: weight = weight.expand(-1, 3, -1, -1) return self.loss_weight * l1_loss(pred_merged, ori_merged, weight, reduction=self.reduction, sample_wise=self.sample_wise) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_mean_mul_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp5 = tl.load(in_ptr2 + r0, None) tmp8 = tl.load(in_ptr3 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp7 - tmp8 tmp10 = tl_math.abs(tmp9) tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = tmp15 * tmp3 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_mean_mul_rsub_sub_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss if reduction_enum == 1: return loss.mean() return loss.sum() def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. Default: None. reduction (str): Same as built-in losses of PyTorch. Options are "none", "mean" and "sum". Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if weight is None or reduction == 'sum': loss = reduce_loss(loss, reduction) elif reduction == 'mean': if weight.size(1) == 1: weight = weight.expand_as(loss) eps = 1e-12 if sample_wise: weight = weight.sum(dim=[1, 2, 3], keepdim=True) loss = (loss / (weight + eps)).sum() / weight.size(0) else: loss = loss.sum() / (weight.sum() + eps) return loss def masked_loss(loss_func): """Create a masked version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @masked_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', sample_wise= False, **kwargs): loss = loss_func(pred, target, **kwargs) loss = mask_reduce_loss(loss, weight, reduction, sample_wise) return loss return wrapper @masked_loss def l1_loss(pred, target): """L1 loss. Args: pred (Tensor): Prediction Tensor with shape (n, c, h, w). target ([type]): Target Tensor with shape (n, c, h, w). Returns: Tensor: Calculated L1 loss. """ return F.l1_loss(pred, target, reduction='none') class L1CompositionLossNew(nn.Module): """L1 composition loss. Args: loss_weight (float): Loss weight for L1 loss. Default: 1.0. reduction (str): Specifies the reduction to apply to the output. Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. sample_wise (bool): Whether calculate the loss sample-wise. This argument only takes effect when `reduction` is 'mean' and `weight` (argument of `forward()`) is not None. It will first reduces loss with 'mean' per-sample, and then it means over all the samples. Default: False. """ def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False): super().__init__() if reduction not in ['none', 'mean', 'sum']: raise ValueError( f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}' ) self.loss_weight = loss_weight self.reduction = reduction self.sample_wise = sample_wise def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
akimotty877/mmediting
L1CompositionLoss
false
3,066
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
CustomBatchNormManualModule
import torch import torch.nn as nn class CustomBatchNormManualFunction(torch.autograd.Function): """ This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs. Using torch.autograd.Function allows you to write a custom backward function. The function will be called from the nn.Module CustomBatchNormManualModule Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward pass is done via the backward method. The forward pass is not called directly but via the apply() method. This makes sure that the context objects are dealt with correctly. Example: my_bn_fct = CustomBatchNormManualFunction() normalized = fct.apply(input, gamma, beta, eps) """ @staticmethod def forward(ctx, input, gamma, beta, eps=1e-05): """ Compute the batch normalization Args: ctx: context object handling storing and retrival of tensors and constants and specifying whether tensors need gradients in backward pass input: input tensor of shape (n_batch, n_neurons) gamma: variance scaling tensor, applied per neuron, shpae (n_neurons) beta: mean bias tensor, applied per neuron, shpae (n_neurons) eps: small float added to the variance for stability Returns: out: batch-normalized tensor """ mu = torch.mean(input, 0) var = torch.var(input, dim=0, unbiased=False) x_hat = (input - mu) / (var + eps) ** 0.5 out = x_hat * gamma + beta ctx.save_for_backward(input, x_hat, out, gamma, beta, mu, var) return out @staticmethod def backward(ctx, grad_output): """ Compute backward pass of the batch normalization. Args: ctx: context object handling storing and retrival of tensors and constants and specifying whether tensors need gradients in backward pass Returns: out: tuple containing gradients for all input arguments """ x, x_hat, _y, gamma, _beta, _mu, var = ctx.saved_tensors grad_gamma = torch.sum(grad_output * x_hat, 0) if ctx.needs_input_grad[ 1] else None grad_beta = torch.sum(grad_output, 0) if ctx.needs_input_grad[2 ] else None grad_input = gamma * (1 / torch.sqrt(var)) / x.shape[0] * (x.shape[ 0] * grad_output - x_hat * grad_gamma - grad_beta ) if ctx.needs_input_grad[0] else None return grad_input, grad_gamma, grad_beta, None class CustomBatchNormManualModule(nn.Module): """ This nn.module implements a custom version of the batch norm operation for MLPs. In self.forward the functional version CustomBatchNormManualFunction.forward is called. The automatic differentiation of PyTorch calls the backward method of this function in the backward pass. """ def __init__(self, n_neurons, eps=1e-05): """ Initializes CustomBatchNormManualModule object. Args: n_neurons: int specifying the number of neurons eps: small float to be added to the variance for stability """ super(CustomBatchNormManualModule, self).__init__() self.n_neurons = n_neurons self.eps = eps self.gamma = nn.Parameter(torch.ones(self.n_neurons)) self.beta = nn.Parameter(torch.zeros(self.n_neurons)) def forward(self, input): """ Compute the batch normalization via CustomBatchNormManualFunction Args: input: input tensor of shape (n_batch, n_neurons) Returns: out: batch-normalized tensor """ assert input.shape[1] == self.n_neurons, 'Input has incorrect shape' out = CustomBatchNormManualFunction().apply(input, self.gamma, self .beta, self.eps) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_neurons': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp21 / tmp8 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tmp10 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp28 + tmp29 tl.store(out_ptr0 + x3, tmp30, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_pow_sub_var_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class CustomBatchNormManualFunction(torch.autograd.Function): """ This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs. Using torch.autograd.Function allows you to write a custom backward function. The function will be called from the nn.Module CustomBatchNormManualModule Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward pass is done via the backward method. The forward pass is not called directly but via the apply() method. This makes sure that the context objects are dealt with correctly. Example: my_bn_fct = CustomBatchNormManualFunction() normalized = fct.apply(input, gamma, beta, eps) """ @staticmethod def forward(ctx, input, gamma, beta, eps=1e-05): """ Compute the batch normalization Args: ctx: context object handling storing and retrival of tensors and constants and specifying whether tensors need gradients in backward pass input: input tensor of shape (n_batch, n_neurons) gamma: variance scaling tensor, applied per neuron, shpae (n_neurons) beta: mean bias tensor, applied per neuron, shpae (n_neurons) eps: small float added to the variance for stability Returns: out: batch-normalized tensor """ mu = torch.mean(input, 0) var = torch.var(input, dim=0, unbiased=False) x_hat = (input - mu) / (var + eps) ** 0.5 out = x_hat * gamma + beta ctx.save_for_backward(input, x_hat, out, gamma, beta, mu, var) return out @staticmethod def backward(ctx, grad_output): """ Compute backward pass of the batch normalization. Args: ctx: context object handling storing and retrival of tensors and constants and specifying whether tensors need gradients in backward pass Returns: out: tuple containing gradients for all input arguments """ x, x_hat, _y, gamma, _beta, _mu, var = ctx.saved_tensors grad_gamma = torch.sum(grad_output * x_hat, 0) if ctx.needs_input_grad[ 1] else None grad_beta = torch.sum(grad_output, 0) if ctx.needs_input_grad[2 ] else None grad_input = gamma * (1 / torch.sqrt(var)) / x.shape[0] * (x.shape[ 0] * grad_output - x_hat * grad_gamma - grad_beta ) if ctx.needs_input_grad[0] else None return grad_input, grad_gamma, grad_beta, None class CustomBatchNormManualModuleNew(nn.Module): """ This nn.module implements a custom version of the batch norm operation for MLPs. In self.forward the functional version CustomBatchNormManualFunction.forward is called. The automatic differentiation of PyTorch calls the backward method of this function in the backward pass. """ def __init__(self, n_neurons, eps=1e-05): """ Initializes CustomBatchNormManualModule object. Args: n_neurons: int specifying the number of neurons eps: small float to be added to the variance for stability """ super(CustomBatchNormManualModuleNew, self).__init__() self.n_neurons = n_neurons self.eps = eps self.gamma = nn.Parameter(torch.ones(self.n_neurons)) self.beta = nn.Parameter(torch.zeros(self.n_neurons)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
akashrajkn/sarcastic-gradients
CustomBatchNormManualModule
false
3,067
[ "Apache-2.0" ]
0
5a995ab7822dfd49cdc88855c631dcc8f1b0532f
https://github.com/akashrajkn/sarcastic-gradients/tree/5a995ab7822dfd49cdc88855c631dcc8f1b0532f
EqualLinearActModule
import torch import torch.nn as nn from copy import deepcopy from functools import partial from torch.nn.init import _calculate_correct_fan def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super(EqualizedLRLinearModule, self).__init__(*args, **kwargs) self.with_equlized_lr = equalized_lr_cfg is not None if self.with_equlized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equlized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModule(nn.Module): """Equalized LR Linear Module with Activation Layer. Args: nn ([type]): [description] """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super(EqualLinearActModule, self).__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, x): if x.ndim >= 3: x = x.reshape(x.size(0), -1) x = self.linear(x) if self.with_activation and self.act_type == 'fused_bias': x = self.activate(x, self.bias * self.lr_mul) elif self.bias is not None and self.with_activation: x = self.activate(x + self.bias * self.lr_mul) elif self.bias is not None: x = x + self.bias * self.lr_mul elif self.with_activation: x = self.activate(x) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from copy import deepcopy from functools import partial from torch.nn.init import _calculate_correct_fan assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sqrt_0[grid(16)](primals_2, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 return buf2, buf0, primals_1 def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super(EqualizedLRLinearModule, self).__init__(*args, **kwargs) self.with_equlized_lr = equalized_lr_cfg is not None if self.with_equlized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equlized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModuleNew(nn.Module): """Equalized LR Linear Module with Activation Layer. Args: nn ([type]): [description] """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super(EqualLinearActModuleNew, self).__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, input_0): primals_3 = self.bias primals_1 = self.linear.weight_orig primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
akimotty877/mmediting
EqualLinearActModule
false
3,068
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
CustomBatchNormAutograd
import torch import torch.nn as nn class CustomBatchNormAutograd(nn.Module): """ This nn.module implements a custom version of the batch norm operation for MLPs. The operations called in self.forward track the history if the input tensors have the flag requires_grad set to True. The backward pass does not need to be implemented, it is dealt with by the automatic differentiation provided by PyTorch. """ def __init__(self, n_neurons, eps=1e-05): """ Initializes CustomBatchNormAutograd object. Args: n_neurons: int specifying the number of neurons eps: small float to be added to the variance for stability """ super(CustomBatchNormAutograd, self).__init__() self.n_neurons = n_neurons self.eps = eps self.gamma = nn.Parameter(torch.ones(n_neurons)) self.beta = nn.Parameter(torch.zeros(n_neurons)) def forward(self, input): """ Compute the batch normalization Args: input: input tensor of shape (n_batch, n_neurons) Returns: out: batch-normalized tensor """ assert input.shape[1] == self.n_neurons, 'Input has incorrect shape' mu = torch.mean(input, 0) var = torch.var(input, dim=0, unbiased=False) x = (input - mu) / (var + self.eps) ** 0.5 out = x * self.gamma + self.beta return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_neurons': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp21 / tmp8 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tmp10 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp28 + tmp29 tl.store(out_ptr0 + x3, tmp30, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_pow_sub_var_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class CustomBatchNormAutogradNew(nn.Module): """ This nn.module implements a custom version of the batch norm operation for MLPs. The operations called in self.forward track the history if the input tensors have the flag requires_grad set to True. The backward pass does not need to be implemented, it is dealt with by the automatic differentiation provided by PyTorch. """ def __init__(self, n_neurons, eps=1e-05): """ Initializes CustomBatchNormAutograd object. Args: n_neurons: int specifying the number of neurons eps: small float to be added to the variance for stability """ super(CustomBatchNormAutogradNew, self).__init__() self.n_neurons = n_neurons self.eps = eps self.gamma = nn.Parameter(torch.ones(n_neurons)) self.beta = nn.Parameter(torch.zeros(n_neurons)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
akashrajkn/sarcastic-gradients
CustomBatchNormAutograd
false
3,069
[ "Apache-2.0" ]
0
5a995ab7822dfd49cdc88855c631dcc8f1b0532f
https://github.com/akashrajkn/sarcastic-gradients/tree/5a995ab7822dfd49cdc88855c631dcc8f1b0532f
MaxFeature
import torch import torch.nn as nn class MaxFeature(nn.Module): """Conv2d or Linear layer with max feature selector Generate feature maps with double channels, split them and select the max feature. Args: in_channels (int): Channel number of inputs. out_channels (int): Channel number of outputs. kernel_size (int or tuple): Size of the convolving kernel. stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 1 filter_type (str): Type of filter. Options are 'conv2d' and 'linear'. Default: 'conv2d'. """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, filter_type='conv2d'): super().__init__() self.out_channels = out_channels filter_type = filter_type.lower() if filter_type == 'conv2d': self.filter = nn.Conv2d(in_channels, 2 * out_channels, kernel_size=kernel_size, stride=stride, padding=padding) elif filter_type == 'linear': self.filter = nn.Linear(in_channels, 2 * out_channels) else: raise ValueError( f"'filter_type' should be 'conv2d' or 'linear', but got {filter_type}" ) def forward(self, x): """Forward function. Args: x (Tensor): Input tensor. Returns: Tensor: Forward results. """ x = self.filter(x) out = torch.chunk(x, chunks=2, dim=1) return torch.max(out[0], out[1]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_gt_lt_maximum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x3 = xindex % 64 x1 = xindex // 16 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 128 * x2), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3 + 128 * x2), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp7 = tmp2 == tmp5 tmp8 = tmp2 > tmp5 tmp9 = tmp2 < tmp5 tl.store(out_ptr0 + x4, tmp6, xmask) tl.store(out_ptr1 + x4, tmp7, xmask) tl.store(out_ptr2 + x4, tmp8, xmask) tl.store(out_ptr3 + x4, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_gt_lt_maximum_0[grid(256)](buf0, primals_2, buf1, buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf1, primals_1, primals_3, buf2, buf3, buf4 class MaxFeatureNew(nn.Module): """Conv2d or Linear layer with max feature selector Generate feature maps with double channels, split them and select the max feature. Args: in_channels (int): Channel number of inputs. out_channels (int): Channel number of outputs. kernel_size (int or tuple): Size of the convolving kernel. stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 1 filter_type (str): Type of filter. Options are 'conv2d' and 'linear'. Default: 'conv2d'. """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, filter_type='conv2d'): super().__init__() self.out_channels = out_channels filter_type = filter_type.lower() if filter_type == 'conv2d': self.filter = nn.Conv2d(in_channels, 2 * out_channels, kernel_size=kernel_size, stride=stride, padding=padding) elif filter_type == 'linear': self.filter = nn.Linear(in_channels, 2 * out_channels) else: raise ValueError( f"'filter_type' should be 'conv2d' or 'linear', but got {filter_type}" ) def forward(self, input_0): primals_1 = self.filter.weight primals_2 = self.filter.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
akimotty877/mmediting
MaxFeature
false
3,070
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
PlainRefiner
import torch import torch.nn as nn class PlainRefiner(nn.Module): """Simple refiner from Deep Image Matting. Args: conv_channels (int): Number of channels produced by the three main convolutional layer. loss_refine (dict): Config of the loss of the refiner. Default: None. pretrained (str): Name of pretrained model. Default: None. """ def __init__(self, conv_channels=64, pretrained=None): super().__init__() assert pretrained is None, 'pretrained not supported yet' self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3, padding=1) self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels, kernel_size=3, padding=1) self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels, kernel_size=3, padding=1) self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1 ) self.relu = nn.ReLU(inplace=True) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m) def forward(self, x, raw_alpha): """Forward function. Args: x (Tensor): The input feature map of refiner. raw_alpha (Tensor): The raw predicted alpha matte. Returns: Tensor: The refined alpha matte. """ out = self.relu(self.refine_conv1(x)) out = self.relu(self.refine_conv2(out)) out = self.relu(self.refine_conv3(out)) raw_refine = self.refine_pred(out) pred_refine = torch.sigmoid(raw_alpha + raw_refine) return pred_refine def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp6 = tl.sigmoid(tmp5) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (1, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (1,), (1,)) assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(4096)](buf5, primals_7, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_convolution_sigmoid_1[grid(256)](primals_10, buf6, primals_9, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_10 del primals_9 return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf7) class PlainRefinerNew(nn.Module): """Simple refiner from Deep Image Matting. Args: conv_channels (int): Number of channels produced by the three main convolutional layer. loss_refine (dict): Config of the loss of the refiner. Default: None. pretrained (str): Name of pretrained model. Default: None. """ def __init__(self, conv_channels=64, pretrained=None): super().__init__() assert pretrained is None, 'pretrained not supported yet' self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3, padding=1) self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels, kernel_size=3, padding=1) self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels, kernel_size=3, padding=1) self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1 ) self.relu = nn.ReLU(inplace=True) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m) def forward(self, input_0, input_1): primals_1 = self.refine_conv1.weight primals_2 = self.refine_conv1.bias primals_4 = self.refine_conv2.weight primals_5 = self.refine_conv2.bias primals_6 = self.refine_conv3.weight primals_7 = self.refine_conv3.bias primals_8 = self.refine_pred.weight primals_9 = self.refine_pred.bias primals_3 = input_0 primals_10 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
akimotty877/mmediting
PlainRefiner
false
3,071
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
network
import torch import torch.nn as nn import torch.nn.functional as F class network(nn.Module): def __init__(self, state_size, action_size, seed=0): super(network, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, 32) self.fc2 = nn.Linear(32, 32) self.fc3 = nn.Linear(32, action_size) def forward(self, state): """Build a network that maps state -> action values.""" x = self.fc1(state) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf6, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3, primals_5, buf5, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6 class networkNew(nn.Module): def __init__(self, state_size, action_size, seed=0): super(networkNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, 32) self.fc2 = nn.Linear(32, 32) self.fc3 = nn.Linear(32, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
akashkmr27089/ReinforcementLearning_Udacity_Deep_Reinforcemnt_Learning
network
false
3,072
[ "MIT" ]
0
b7dc13b0116898848d8d0b8a95b7af182982bd6b
https://github.com/akashkmr27089/ReinforcementLearning_Udacity_Deep_Reinforcemnt_Learning/tree/b7dc13b0116898848d8d0b8a95b7af182982bd6b
MultiLayeredConv1d
import torch class MultiLayeredConv1d(torch.nn.Module): """Multi-layered conv1d for Transformer block. This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_. Args: in_chans (int): Number of input channels. hidden_chans (int): Number of hidden channels. kernel_size (int): Kernel size of conv1d. dropout_rate (float): Dropout rate. .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`: https://arxiv.org/pdf/1905.09263.pdf """ def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate): super(MultiLayeredConv1d, self).__init__() self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.dropout = torch.nn.Dropout(dropout_rate) def forward(self, x): """Calculate forward propagation. Args: x (Tensor): Batch of input tensors (B, *, in_chans). Returns: Tensor: Batch of output tensors (B, *, hidden_chans) """ x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1) return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 3), (12, 3, 1)) buf1 = reinterpret_tensor(buf0, (4, 3), (3, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 3), (3, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(12)](buf1, primals_3, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 3 ), (0, 3, 1), 0), primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 2), (8, 2, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(8)](buf3, primals_5, 8, XBLOCK= 8, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf3, (4, 2), (2, 1), 0 ), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4), ( 16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 3), (12, 3, 1), 0), buf4 class MultiLayeredConv1dNew(torch.nn.Module): """Multi-layered conv1d for Transformer block. This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_. Args: in_chans (int): Number of input channels. hidden_chans (int): Number of hidden channels. kernel_size (int): Kernel size of conv1d. dropout_rate (float): Dropout rate. .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`: https://arxiv.org/pdf/1905.09263.pdf """ def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate): super(MultiLayeredConv1dNew, self).__init__() self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.dropout = torch.nn.Dropout(dropout_rate) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
akreal/end-to-end-slu-espnet
MultiLayeredConv1d
false
3,073
[ "Apache-2.0" ]
0
0b16dc8b10b31a4567b3312678a753a94bb200da
https://github.com/akreal/end-to-end-slu-espnet/tree/0b16dc8b10b31a4567b3312678a753a94bb200da
ClassHead
import torch from torch import nn import torch.nn class ClassHead(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(ClassHead, self).__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) return out def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 24 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 6 y1 = yindex // 6 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 6 * x2 + 24576 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6)) buf2 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_1[grid(24, 4096)](buf1, primals_2, buf2, 24, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf1 del primals_2 return buf2, primals_1, buf0 class ClassHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(ClassHeadNew, self).__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ZongqingHou/Pytorch_Retinaface
ClassHead
false
3,074
[ "MIT" ]
0
6284b7158a0d9d3d4a2cc267a393c21863a1b938
https://github.com/ZongqingHou/Pytorch_Retinaface/tree/6284b7158a0d9d3d4a2cc267a393c21863a1b938
Intensity
import torch import torch.nn as nn class Intensity(nn.Module): def __init__(self, scale): super().__init__() self.scale = scale def forward(self, x): r = torch.randn((x.size(0), 1, 1, 1), device=x.device) noise = 1.0 + self.scale * r.clamp(-2.0, 2.0) return x * noise def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = -2.0 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = 2.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = tmp7 + tmp6 tmp9 = tmp0 * tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 1, 1, 1], device=device( type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_mul_0[grid(256)](arg0_1, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf1 return buf2, class IntensityNew(nn.Module): def __init__(self, scale): super().__init__() self.scale = scale def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
alcinos/SPR
Intensity
false
3,075
[ "MIT" ]
0
dec8df83eeaa25a1d75ecff0cf4ce4bfae9cab4c
https://github.com/alcinos/SPR/tree/dec8df83eeaa25a1d75ecff0cf4ce4bfae9cab4c
Entropy
import torch import torch.nn.functional as F import torch.nn as nn class Entropy(nn.Module): def __init__(self): super(Entropy, self).__init__() def forward(self, x): num, ms1, ms2 = x.size() ent_p2g = F.softmax(x, dim=1) * F.log_softmax(x, dim=1) ent_g2p = F.softmax(x, dim=2) * F.log_softmax(x, dim=2) ent_sum = -1.0 * ent_p2g.view(num, -1).sum() - ent_g2p.view(num, -1 ).sum() return ent_sum / (ms1 * ms2) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = tmp0 - tmp16 tmp18 = tl_math.exp(tmp17) tl.store(out_ptr0 + x4, tmp9, xmask) tl.store(out_ptr1 + x4, tmp8, xmask) tl.store(out_ptr2 + x4, tmp18, xmask) tl.store(out_ptr3 + x4, tmp17, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 r4 = rindex // 4 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + r3, None) tmp10 = tl.load(in_ptr1 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr2 + r3, None) tmp28 = tl.load(in_ptr2 + 4 * r4, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + 4 * r4), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + (2 + 4 * r4), None, eviction_policy='evict_last') tmp33 = tl.load(in_ptr2 + (3 + 4 * r4), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + r3, None) tmp37 = tl.load(in_ptr3 + 4 * r4, None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr3 + (1 + 4 * r4), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr3 + (2 + 4 * r4), None, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (3 + 4 * r4), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp11 = tl_math.exp(tmp10) tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tl_math.log(tmp20) tmp22 = tmp9 - tmp21 tmp23 = tmp8 * tmp22 tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp35 = tmp27 / tmp34 tmp38 = tl_math.exp(tmp37) tmp40 = tl_math.exp(tmp39) tmp41 = tmp38 + tmp40 tmp43 = tl_math.exp(tmp42) tmp44 = tmp41 + tmp43 tmp46 = tl_math.exp(tmp45) tmp47 = tmp44 + tmp46 tmp48 = tl_math.log(tmp47) tmp49 = tmp36 - tmp48 tmp50 = tmp35 * tmp49 tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = tl.sum(tmp51, 1)[:, None] tmp54 = -1.0 tmp55 = tmp26 * tmp54 tmp56 = tmp55 - tmp53 tmp57 = 0.0625 tmp58 = tmp56 * tmp57 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp58, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0[grid(64)](arg0_1, buf0, buf1, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf8 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_1[grid(1)](buf8, buf0, buf1, buf4, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf4 del buf5 return buf8, class EntropyNew(nn.Module): def __init__(self): super(EntropyNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
akira-l/online_mmdetection
Entropy
false
3,076
[ "Apache-2.0" ]
0
10c60467a57a605b783486b7fbc508776394ea79
https://github.com/akira-l/online_mmdetection/tree/10c60467a57a605b783486b7fbc508776394ea79
CifarDownsampling
import torch import torch.nn as nn import torch.nn.functional as F class CifarDownsampling(nn.Module): def __init__(self, planes): super(CifarDownsampling, self).__init__() self.planes = planes def forward(self, x): return F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, self.planes // 4, self .planes // 4), 'constant', 0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'planes': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4 % 6 x0 = xindex % 2 x3 = xindex // 24 x5 = xindex // 2 % 12 x6 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-16 + 2 * x0 + 8 * x5 + 64 * x3), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + x6, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 2, 2), (24, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(96)](arg0_1, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class CifarDownsamplingNew(nn.Module): def __init__(self, planes): super(CifarDownsamplingNew, self).__init__() self.planes = planes def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
alechat/PLCiL
CifarDownsampling
false
3,077
[ "Apache-2.0" ]
0
f71fe92cb7781097d3320c28601e06add70f64f9
https://github.com/alechat/PLCiL/tree/f71fe92cb7781097d3320c28601e06add70f64f9
ModulatedToRGB
import torch import torch.nn as nn from copy import deepcopy from functools import partial from torch.nn import functional as F from torch.nn.init import _calculate_correct_fan def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module def _make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super(EqualizedLRLinearModule, self).__init__(*args, **kwargs) self.with_equlized_lr = equalized_lr_cfg is not None if self.with_equlized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equlized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModule(nn.Module): """Equalized LR Linear Module with Activation Layer. Args: nn ([type]): [description] """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super(EqualLinearActModule, self).__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, x): if x.ndim >= 3: x = x.reshape(x.size(0), -1) x = self.linear(x) if self.with_activation and self.act_type == 'fused_bias': x = self.activate(x, self.bias * self.lr_mul) elif self.bias is not None and self.with_activation: x = self.activate(x + self.bias * self.lr_mul) elif self.bias is not None: x = x + self.bias * self.lr_mul elif self.with_activation: x = self.activate(x) return x class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super(Blur, self).__init__() kernel = _make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, x): return upfirdn2d(x, self.kernel, pad=self.pad) class ModulatedConv2d(nn.Module): """Modulated Conv2d in StyleGANv2. Attention: #. ``style_bias`` is provided to check the difference between official TF implementation and other PyTorch implementation. In TF, Tero explicitly add the ``1.`` after style code, while unofficial implementation adopts bias initialization with ``1.``. Details can be found in: https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py#L214 https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py#L99 """ def __init__(self, in_channels, out_channels, kernel_size, style_channels, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1], equalized_lr_cfg=dict(mode='fan_in', lr_mul=1.0, gain=1.0), style_mod_cfg=dict(bias_init=1.0), style_bias=0.0, eps=1e-08): super(ModulatedConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.style_channels = style_channels self.demodulate = demodulate assert isinstance(self.kernel_size, int) and (self.kernel_size >= 1 and self.kernel_size % 2 == 1) self.upsample = upsample self.downsample = downsample self.style_bias = style_bias self.eps = eps style_mod_cfg = dict() if style_mod_cfg is None else style_mod_cfg self.style_modulation = EqualLinearActModule(style_channels, in_channels, **style_mod_cfg) lr_mul_ = 1.0 if equalized_lr_cfg is not None: lr_mul_ = equalized_lr_cfg.get('lr_mul', 1.0) self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size).div_(lr_mul_)) if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, (pad0, pad1), upsample_factor=factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) if equalized_lr_cfg is not None: equalized_lr(self, **equalized_lr_cfg) self.padding = kernel_size // 2 def forward(self, x, style): n, c, h, w = x.shape style = self.style_modulation(style).view(n, 1, c, 1, 1 ) + self.style_bias weight = self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(n, self.out_channels, 1, 1, 1) weight = weight.view(n * self.out_channels, c, self.kernel_size, self.kernel_size) if self.upsample: x = x.reshape(1, n * c, h, w) weight = weight.view(n, self.out_channels, c, self.kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(n * c, self. out_channels, self.kernel_size, self.kernel_size) x = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=n) x = x.reshape(n, self.out_channels, *x.shape[-2:]) x = self.blur(x) elif self.downsample: x = self.blur(x) x = x.view(1, n * self.in_channels, *x.shape[-2:]) x = F.conv2d(x, weight, stride=2, padding=0, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) else: x = x.view(1, n * c, h, w) x = F.conv2d(x, weight, stride=1, padding=self.padding, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) return x class UpsampleUpFIRDn(nn.Module): def __init__(self, kernel, factor=2): super(UpsampleUpFIRDn, self).__init__() self.factor = factor kernel = _make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, x): out = upfirdn2d(x, self.kernel, up=self.factor, down=1, pad=self.pad) return out class ModulatedToRGB(nn.Module): def __init__(self, in_channels, style_channels, out_channels=3, upsample=True, blur_kernel=[1, 3, 3, 1], style_mod_cfg=dict( bias_init=1.0), style_bias=0.0): super(ModulatedToRGB, self).__init__() if upsample: self.upsample = UpsampleUpFIRDn(blur_kernel) self.conv = ModulatedConv2d(in_channels, out_channels=out_channels, kernel_size=1, style_channels=style_channels, demodulate=False, style_mod_cfg=style_mod_cfg, style_bias=style_bias) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, x, style, skip=None): out = self.conv(x, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'style_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from copy import deepcopy from functools import partial from torch.nn import functional as F from torch.nn.init import _calculate_correct_fan assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_sqrt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = 0.0 tmp7 = tmp5 + tmp6 tmp8 = tmp0 * tmp7 tl.store(out_ptr0 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) get_raw_stream(0) triton_poi_fused_mul_sqrt_0[grid(12)](primals_1, buf0, 12, XBLOCK= 16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sqrt_1[grid(16)](primals_4, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf1, (4, 4), (1, 4 ), 0), out=buf2) buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_add_mul_2[grid(48)](buf0, buf2, primals_5, buf3, 48, XBLOCK=64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return (buf5, buf0, buf1, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_2, (1, 16, 4, 4), (256, 16, 4, 1), 0)) def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module def _make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super(EqualizedLRLinearModule, self).__init__(*args, **kwargs) self.with_equlized_lr = equalized_lr_cfg is not None if self.with_equlized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equlized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModule(nn.Module): """Equalized LR Linear Module with Activation Layer. Args: nn ([type]): [description] """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super(EqualLinearActModule, self).__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, x): if x.ndim >= 3: x = x.reshape(x.size(0), -1) x = self.linear(x) if self.with_activation and self.act_type == 'fused_bias': x = self.activate(x, self.bias * self.lr_mul) elif self.bias is not None and self.with_activation: x = self.activate(x + self.bias * self.lr_mul) elif self.bias is not None: x = x + self.bias * self.lr_mul elif self.with_activation: x = self.activate(x) return x class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super(Blur, self).__init__() kernel = _make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, x): return upfirdn2d(x, self.kernel, pad=self.pad) class ModulatedConv2d(nn.Module): """Modulated Conv2d in StyleGANv2. Attention: #. ``style_bias`` is provided to check the difference between official TF implementation and other PyTorch implementation. In TF, Tero explicitly add the ``1.`` after style code, while unofficial implementation adopts bias initialization with ``1.``. Details can be found in: https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py#L214 https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py#L99 """ def __init__(self, in_channels, out_channels, kernel_size, style_channels, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1], equalized_lr_cfg=dict(mode='fan_in', lr_mul=1.0, gain=1.0), style_mod_cfg=dict(bias_init=1.0), style_bias=0.0, eps=1e-08): super(ModulatedConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.style_channels = style_channels self.demodulate = demodulate assert isinstance(self.kernel_size, int) and (self.kernel_size >= 1 and self.kernel_size % 2 == 1) self.upsample = upsample self.downsample = downsample self.style_bias = style_bias self.eps = eps style_mod_cfg = dict() if style_mod_cfg is None else style_mod_cfg self.style_modulation = EqualLinearActModule(style_channels, in_channels, **style_mod_cfg) lr_mul_ = 1.0 if equalized_lr_cfg is not None: lr_mul_ = equalized_lr_cfg.get('lr_mul', 1.0) self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size).div_(lr_mul_)) if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, (pad0, pad1), upsample_factor=factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) if equalized_lr_cfg is not None: equalized_lr(self, **equalized_lr_cfg) self.padding = kernel_size // 2 def forward(self, x, style): n, c, h, w = x.shape style = self.style_modulation(style).view(n, 1, c, 1, 1 ) + self.style_bias weight = self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(n, self.out_channels, 1, 1, 1) weight = weight.view(n * self.out_channels, c, self.kernel_size, self.kernel_size) if self.upsample: x = x.reshape(1, n * c, h, w) weight = weight.view(n, self.out_channels, c, self.kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(n * c, self. out_channels, self.kernel_size, self.kernel_size) x = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=n) x = x.reshape(n, self.out_channels, *x.shape[-2:]) x = self.blur(x) elif self.downsample: x = self.blur(x) x = x.view(1, n * self.in_channels, *x.shape[-2:]) x = F.conv2d(x, weight, stride=2, padding=0, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) else: x = x.view(1, n * c, h, w) x = F.conv2d(x, weight, stride=1, padding=self.padding, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) return x class UpsampleUpFIRDn(nn.Module): def __init__(self, kernel, factor=2): super(UpsampleUpFIRDn, self).__init__() self.factor = factor kernel = _make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, x): out = upfirdn2d(x, self.kernel, up=self.factor, down=1, pad=self.pad) return out class ModulatedToRGBNew(nn.Module): def __init__(self, in_channels, style_channels, out_channels=3, upsample=True, blur_kernel=[1, 3, 3, 1], style_mod_cfg=dict( bias_init=1.0), style_bias=0.0): super(ModulatedToRGBNew, self).__init__() if upsample: self.upsample = UpsampleUpFIRDn(blur_kernel) self.conv = ModulatedConv2d(in_channels, out_channels=out_channels, kernel_size=1, style_channels=style_channels, demodulate=False, style_mod_cfg=style_mod_cfg, style_bias=style_bias) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_1 = self.conv.weight_orig primals_5 = self.conv.style_modulation.bias primals_3 = self.conv.style_modulation.linear.weight_orig primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
akimotty877/mmediting
ModulatedToRGB
false
3,078
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
MLP
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module from scipy.sparse import * class MLP(Module): def __init__(self, features_dim, hidden_dim, out_dim, bias=True, dropout=0.3): super(MLP, self).__init__() self.features_dim = features_dim self.out_dim = out_dim self.dropout = dropout self.linear = torch.nn.Linear(features_dim, hidden_dim) self.z_mean = torch.nn.Linear(hidden_dim, out_dim) self.z_log_std = torch.nn.Linear(hidden_dim, out_dim) def forward(self, input): hidden = F.relu(self.linear(input)) z_mean = F.dropout(self.z_mean(hidden), self.dropout, training=self .training) z_log_std = F.dropout(self.z_log_std(hidden), self.dropout, training=self.training) return z_mean, z_log_std def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features_dim': 4, 'hidden_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch.nn.modules.module import Module from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), primals_6, primals_4, buf4 class MLPNew(Module): def __init__(self, features_dim, hidden_dim, out_dim, bias=True, dropout=0.3): super(MLPNew, self).__init__() self.features_dim = features_dim self.out_dim = out_dim self.dropout = dropout self.linear = torch.nn.Linear(features_dim, hidden_dim) self.z_mean = torch.nn.Linear(hidden_dim, out_dim) self.z_log_std = torch.nn.Linear(hidden_dim, out_dim) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.z_mean.weight primals_5 = self.z_mean.bias primals_6 = self.z_log_std.weight primals_7 = self.z_log_std.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
TTomatoZhang/GHGCN
MLP
false
3,079
[ "Apache-2.0" ]
0
09a07ff9e29e5889b912ca5feff74bb9308eda55
https://github.com/TTomatoZhang/GHGCN/tree/09a07ff9e29e5889b912ca5feff74bb9308eda55
SRCNN
import logging import torch import torch.nn as nn def get_root_logger(log_file=None, log_level=logging.INFO): """Get the root logger. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmedit". Args: log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger. log_level (int): The root logger level. Note that only the process of rank 0 is affected, while other processes will set the level to "Error" and be silent most of the time. Returns: logging.Logger: The root logger. """ logger = get_logger(__name__.split('.')[0], log_file, log_level) return logger class SRCNN(nn.Module): """SRCNN network structure for image super resolution. SRCNN has three conv layers. For each layer, we can define the `in_channels`, `out_channels` and `kernel_size`. The input image will first be upsampled with a bicubic upsampler, and then super-resolved in the HR spatial size. Paper: Learning a Deep Convolutional Network for Image Super-Resolution. Args: channels (tuple[int]): A tuple of channel numbers for each layer including channels of input and output . Default: (3, 64, 32, 3). kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer. Default: (9, 1, 5). upscale_factor (int): Upsampling factor. Default: 4. """ def __init__(self, channels=(3, 64, 32, 3), kernel_sizes=(9, 1, 5), upscale_factor=4): super().__init__() assert len(channels ) == 4, f'The length of channel tuple should be 4, but got {len(channels)}' assert len(kernel_sizes ) == 3, f'The length of kernel tuple should be 3, but got {len(kernel_sizes)}' self.upscale_factor = upscale_factor self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor, mode='bicubic', align_corners=False) self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size= kernel_sizes[0], padding=kernel_sizes[0] // 2) self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size= kernel_sizes[1], padding=kernel_sizes[1] // 2) self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size= kernel_sizes[2], padding=kernel_sizes[2] // 2) self.relu = nn.ReLU() def forward(self, x): """Forward function. Args: x (Tensor): Input tensor with shape (n, c, h, w). Returns: Tensor: Forward results. """ x = self.img_upsampler(x) out = self.relu(self.conv1(x)) out = self.relu(self.conv2(out)) out = self.conv3(out) return out def init_weights(self, pretrained=None, strict=True): """Init weights for models. Args: pretrained (str, optional): Path for pretrained weights. If given None, pretrained weights will not be loaded. Defaults to None. strict (boo, optional): Whether strictly load the pretrained model. Defaults to True. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=strict, logger=logger) elif pretrained is None: pass else: raise TypeError( f'"pretrained" must be a str or None. But received {type(pretrained)}.' ) def get_inputs(): return [torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import logging import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0( in_out_ptr1, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = libdevice.floor(tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 - tmp9 tmp11 = tl.full([1], 0, tl.int64) tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tl.full([1], 3, tl.int64) tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp15 = x0 tmp16 = tmp15.to(tl.float32) tmp17 = tmp16 + tmp2 tmp18 = tmp17 * tmp4 tmp19 = tmp18 - tmp2 tmp20 = libdevice.floor(tmp19) tmp21 = tmp20.to(tl.int32) tmp22 = tmp21 - tmp9 tmp23 = triton_helpers.maximum(tmp22, tmp11) tmp24 = triton_helpers.minimum(tmp23, tmp13) tmp25 = tl.load(in_ptr0 + (tmp24 + 4 * tmp14 + 16 * x2), xmask, eviction_policy='evict_last') tmp26 = tmp19 - tmp20 tmp27 = 0.0 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp30 + tmp29 tmp32 = -0.75 tmp33 = tmp31 * tmp32 tmp34 = -3.75 tmp35 = tmp33 - tmp34 tmp36 = tmp35 * tmp31 tmp37 = -6.0 tmp38 = tmp36 + tmp37 tmp39 = tmp38 * tmp31 tmp40 = -3.0 tmp41 = tmp39 - tmp40 tmp42 = tmp25 * tmp41 tmp43 = triton_helpers.maximum(tmp21, tmp11) tmp44 = triton_helpers.minimum(tmp43, tmp13) tmp45 = tl.load(in_ptr0 + (tmp44 + 4 * tmp14 + 16 * x2), xmask, eviction_policy='evict_last') tmp46 = 1.25 tmp47 = tmp30 * tmp46 tmp48 = 2.25 tmp49 = tmp47 - tmp48 tmp50 = tmp49 * tmp30 tmp51 = tmp50 * tmp30 tmp52 = tmp51 + tmp29 tmp53 = tmp45 * tmp52 tmp54 = tmp21 + tmp9 tmp55 = triton_helpers.maximum(tmp54, tmp11) tmp56 = triton_helpers.minimum(tmp55, tmp13) tmp57 = tl.load(in_ptr0 + (tmp56 + 4 * tmp14 + 16 * x2), xmask, eviction_policy='evict_last') tmp58 = tmp29 - tmp30 tmp59 = tmp58 * tmp46 tmp60 = tmp59 - tmp48 tmp61 = tmp60 * tmp58 tmp62 = tmp61 * tmp58 tmp63 = tmp62 + tmp29 tmp64 = tmp57 * tmp63 tmp65 = triton_helpers.maximum(tmp8, tmp11) tmp66 = triton_helpers.minimum(tmp65, tmp13) tmp67 = tl.load(in_ptr0 + (tmp24 + 4 * tmp66 + 16 * x2), xmask, eviction_policy='evict_last') tmp68 = tmp67 * tmp41 tmp69 = tl.full([1], 2, tl.int64) tmp70 = tmp21 + tmp69 tmp71 = triton_helpers.maximum(tmp70, tmp11) tmp72 = triton_helpers.minimum(tmp71, tmp13) tmp73 = tl.load(in_ptr0 + (tmp72 + 4 * tmp14 + 16 * x2), xmask, eviction_policy='evict_last') tmp74 = 2.0 tmp75 = tmp74 - tmp30 tmp76 = tmp75 * tmp32 tmp77 = tmp76 - tmp34 tmp78 = tmp77 * tmp75 tmp79 = tmp78 + tmp37 tmp80 = tmp79 * tmp75 tmp81 = tmp80 - tmp40 tmp82 = tmp73 * tmp81 tmp83 = tl.load(in_ptr0 + (tmp44 + 4 * tmp66 + 16 * x2), xmask, eviction_policy='evict_last') tmp84 = tmp83 * tmp52 tmp85 = tl.load(in_ptr0 + (tmp56 + 4 * tmp66 + 16 * x2), xmask, eviction_policy='evict_last') tmp86 = tmp85 * tmp63 tmp87 = tmp8 + tmp9 tmp88 = triton_helpers.maximum(tmp87, tmp11) tmp89 = triton_helpers.minimum(tmp88, tmp13) tmp90 = tl.load(in_ptr0 + (tmp24 + 4 * tmp89 + 16 * x2), xmask, eviction_policy='evict_last') tmp91 = tmp90 * tmp41 tmp92 = tl.load(in_ptr0 + (tmp72 + 4 * tmp66 + 16 * x2), xmask, eviction_policy='evict_last') tmp93 = tmp92 * tmp81 tmp94 = tl.load(in_ptr0 + (tmp44 + 4 * tmp89 + 16 * x2), xmask, eviction_policy='evict_last') tmp95 = tmp94 * tmp52 tmp96 = tl.load(in_ptr0 + (tmp56 + 4 * tmp89 + 16 * x2), xmask, eviction_policy='evict_last') tmp97 = tmp96 * tmp63 tmp98 = tmp8 + tmp69 tmp99 = triton_helpers.maximum(tmp98, tmp11) tmp100 = triton_helpers.minimum(tmp99, tmp13) tmp101 = tl.load(in_ptr0 + (tmp24 + 4 * tmp100 + 16 * x2), xmask, eviction_policy='evict_last') tmp102 = tmp101 * tmp41 tmp103 = tl.load(in_ptr0 + (tmp72 + 4 * tmp89 + 16 * x2), xmask, eviction_policy='evict_last') tmp104 = tmp103 * tmp81 tmp105 = tl.load(in_ptr0 + (tmp44 + 4 * tmp100 + 16 * x2), xmask, eviction_policy='evict_last') tmp106 = tmp105 * tmp52 tmp107 = tl.load(in_ptr0 + (tmp56 + 4 * tmp100 + 16 * x2), xmask, eviction_policy='evict_last') tmp108 = tmp107 * tmp63 tmp109 = tl.load(in_ptr0 + (tmp72 + 4 * tmp100 + 16 * x2), xmask, eviction_policy='evict_last') tmp110 = tmp109 * tmp81 tmp111 = tmp42 + tmp53 tmp112 = tmp111 + tmp64 tmp113 = tmp112 + tmp82 tmp114 = tmp6 - tmp7 tmp115 = triton_helpers.maximum(tmp114, tmp27) tmp116 = triton_helpers.minimum(tmp115, tmp29) tmp117 = tmp116 + tmp29 tmp118 = tmp117 * tmp32 tmp119 = tmp118 - tmp34 tmp120 = tmp119 * tmp117 tmp121 = tmp120 + tmp37 tmp122 = tmp121 * tmp117 tmp123 = tmp122 - tmp40 tmp124 = tmp113 * tmp123 tmp125 = tmp68 + tmp84 tmp126 = tmp125 + tmp86 tmp127 = tmp126 + tmp93 tmp128 = tmp116 * tmp46 tmp129 = tmp128 - tmp48 tmp130 = tmp129 * tmp116 tmp131 = tmp130 * tmp116 tmp132 = tmp131 + tmp29 tmp133 = tmp127 * tmp132 tmp134 = tmp124 + tmp133 tmp135 = tmp91 + tmp95 tmp136 = tmp135 + tmp97 tmp137 = tmp136 + tmp104 tmp138 = tmp29 - tmp116 tmp139 = tmp138 * tmp46 tmp140 = tmp139 - tmp48 tmp141 = tmp140 * tmp138 tmp142 = tmp141 * tmp138 tmp143 = tmp142 + tmp29 tmp144 = tmp137 * tmp143 tmp145 = tmp134 + tmp144 tmp146 = tmp102 + tmp106 tmp147 = tmp146 + tmp108 tmp148 = tmp147 + tmp110 tmp149 = tmp74 - tmp116 tmp150 = tmp149 * tmp32 tmp151 = tmp150 - tmp34 tmp152 = tmp151 * tmp149 tmp153 = tmp152 + tmp37 tmp154 = tmp153 * tmp149 tmp155 = tmp154 - tmp40 tmp156 = tmp148 * tmp155 tmp157 = tmp145 + tmp156 tl.store(in_out_ptr1 + x3, tmp157, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 3072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 256 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (64, 3, 9, 9), (243, 81, 9, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (3, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_7, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf10 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch .float32) buf18 = buf10 del buf10 buf20 = buf18 del buf18 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0[ grid(3072)](buf20, primals_1, 3072, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf21 = extern_kernels.convolution(buf20, primals_2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 64, 16, 16), (16384, 256, 16, 1)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_1[grid(65536)](buf22, primals_3, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf23 = extern_kernels.convolution(buf22, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 32, 16, 16), (8192, 256, 16, 1)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_2[grid(32768)](buf24, primals_5, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf25 = extern_kernels.convolution(buf24, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 3, 16, 16), (768, 256, 16, 1)) buf26 = buf25 del buf25 triton_poi_fused_convolution_3[grid(3072)](buf26, primals_7, 3072, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf26, primals_2, primals_4, primals_6, buf20, buf22, buf24 def get_root_logger(log_file=None, log_level=logging.INFO): """Get the root logger. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmedit". Args: log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger. log_level (int): The root logger level. Note that only the process of rank 0 is affected, while other processes will set the level to "Error" and be silent most of the time. Returns: logging.Logger: The root logger. """ logger = get_logger(__name__.split('.')[0], log_file, log_level) return logger class SRCNNNew(nn.Module): """SRCNN network structure for image super resolution. SRCNN has three conv layers. For each layer, we can define the `in_channels`, `out_channels` and `kernel_size`. The input image will first be upsampled with a bicubic upsampler, and then super-resolved in the HR spatial size. Paper: Learning a Deep Convolutional Network for Image Super-Resolution. Args: channels (tuple[int]): A tuple of channel numbers for each layer including channels of input and output . Default: (3, 64, 32, 3). kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer. Default: (9, 1, 5). upscale_factor (int): Upsampling factor. Default: 4. """ def __init__(self, channels=(3, 64, 32, 3), kernel_sizes=(9, 1, 5), upscale_factor=4): super().__init__() assert len(channels ) == 4, f'The length of channel tuple should be 4, but got {len(channels)}' assert len(kernel_sizes ) == 3, f'The length of kernel tuple should be 3, but got {len(kernel_sizes)}' self.upscale_factor = upscale_factor self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor, mode='bicubic', align_corners=False) self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size= kernel_sizes[0], padding=kernel_sizes[0] // 2) self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size= kernel_sizes[1], padding=kernel_sizes[1] // 2) self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size= kernel_sizes[2], padding=kernel_sizes[2] // 2) self.relu = nn.ReLU() def init_weights(self, pretrained=None, strict=True): """Init weights for models. Args: pretrained (str, optional): Path for pretrained weights. If given None, pretrained weights will not be loaded. Defaults to None. strict (boo, optional): Whether strictly load the pretrained model. Defaults to True. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=strict, logger=logger) elif pretrained is None: pass else: raise TypeError( f'"pretrained" must be a str or None. But received {type(pretrained)}.' ) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
akimotty877/mmediting
SRCNN
false
3,080
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
IOU_Loss
import torch import torch.nn as nn class IOU_Loss(nn.Module): def __init__(self): super().__init__() def forward(self, y_pred, y): i = y_pred.mul(y) u = y_pred + y - i mean_iou = torch.mean(i.view(i.shape[0], -1).sum(1) / u.view(i. shape[0], -1).sum(1)) iou_loss = 1 - mean_iou return iou_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 + tmp1 tmp8 = tmp7 - tmp2 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp12, xmask) @triton.jit def triton_per_fused_div_mean_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 / tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 4.0 tmp7 = tmp5 / tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_div_mean_rsub_1[grid(1)](buf3, buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, class IOU_LossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
allen-q/pytorch
IOU_Loss
false
3,081
[ "MIT" ]
0
76947f8d6f0bcee04425ad69f93b9a5e3a5060ae
https://github.com/allen-q/pytorch/tree/76947f8d6f0bcee04425ad69f93b9a5e3a5060ae
GraphVae
from torch.nn import Module import math import torch import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter from scipy.sparse import * class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=False, act=lambda x: x, dropout=0.0): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.act = act self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) torch.nn.init.xavier_uniform_(self.weight) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): input = F.dropout(input, self.dropout, training=self.training) input = torch.squeeze(input) support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: output = output + self.bias return self.act(output) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GraphVae(Module): def __init__(self, features_dim, hidden_dim, out_dim, bias=False, dropout=0.3): super(GraphVae, self).__init__() self.features_dim = features_dim self.out_dim = out_dim self.dropout = dropout self.gc1 = GraphConvolution(features_dim, hidden_dim, bias=bias, dropout=dropout, act=F.relu) self.gc_mean = GraphConvolution(hidden_dim, out_dim, bias=bias, dropout=dropout) self.gc_log_std = GraphConvolution(hidden_dim, out_dim, bias=bias, dropout=dropout) def forward(self, adj, input): hidden = self.gc1(input, adj) z_mean = self.gc_mean(hidden, adj) z_log_std = self.gc_log_std(hidden, adj) return z_mean, z_log_std def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'features_dim': 4, 'hidden_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import math import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_squeeze_threshold_backward_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf0 del buf0 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_squeeze_threshold_backward_0[grid(16)](buf1, buf2, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3 del buf3 extern_kernels.mm(buf2, primals_5, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf5, out=buf6) del buf5 return buf4, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), reinterpret_tensor( primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), buf7, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=False, act=lambda x: x, dropout=0.0): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.act = act self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) torch.nn.init.xavier_uniform_(self.weight) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): input = F.dropout(input, self.dropout, training=self.training) input = torch.squeeze(input) support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: output = output + self.bias return self.act(output) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GraphVaeNew(Module): def __init__(self, features_dim, hidden_dim, out_dim, bias=False, dropout=0.3): super(GraphVaeNew, self).__init__() self.features_dim = features_dim self.out_dim = out_dim self.dropout = dropout self.gc1 = GraphConvolution(features_dim, hidden_dim, bias=bias, dropout=dropout, act=F.relu) self.gc_mean = GraphConvolution(hidden_dim, out_dim, bias=bias, dropout=dropout) self.gc_log_std = GraphConvolution(hidden_dim, out_dim, bias=bias, dropout=dropout) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_2 = self.gc_mean.weight primals_3 = self.gc_log_std.weight primals_4 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
TTomatoZhang/GHGCN
GraphVae
false
3,082
[ "Apache-2.0" ]
0
09a07ff9e29e5889b912ca5feff74bb9308eda55
https://github.com/TTomatoZhang/GHGCN/tree/09a07ff9e29e5889b912ca5feff74bb9308eda55
Policy
import torch import torch.nn as nn import torch.nn.functional as F class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False) self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4) self.size = 9 * 9 * 16 self.fc1 = nn.Linear(self.size, 256) self.fc2 = nn.Linear(256, 1) self.sig = nn.Sigmoid() def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = x.view(-1, self.size) x = F.relu(self.fc1(x)) return self.sig(self.fc2(x)) def get_inputs(): return [torch.rand([4, 2, 81, 81])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 23104 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 5184 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 81 % 16 x2 = xindex // 1296 x3 = xindex % 1296 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1312 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1408 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 2, 6, 6), (72, 36, 6, 1)) assert_size_stride(primals_2, (4, 2, 81, 81), (13122, 6561, 81, 1)) assert_size_stride(primals_3, (16, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (256, 1296), (1296, 1)) assert_size_stride(primals_6, (256,), (1,)) assert_size_stride(primals_7, (1, 256), (256, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 38, 38), (5776, 1444, 38, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(23104)](buf1, 23104, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 9, 9), (1296, 81, 9, 1)) buf3 = empty_strided_cuda((4, 16, 9, 9), (1312, 81, 9, 1), torch. float32) buf8 = empty_strided_cuda((4, 16, 9, 9), (1408, 81, 9, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(5184)](buf2 , primals_4, buf3, buf8, 5184, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_4 buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (4, 1296), (1312, 1), 0), reinterpret_tensor(primals_5, (1296, 256), (1, 1296), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(1024)](buf5, primals_6, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_sigmoid_3[grid(4)](buf7, primals_8, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_8 return buf7, primals_1, primals_2, primals_3, buf1, reinterpret_tensor(buf3 , (4, 1296), (1312, 1), 0), buf5, buf7, primals_7, primals_5, buf8 class PolicyNew(nn.Module): def __init__(self): super(PolicyNew, self).__init__() self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False) self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4) self.size = 9 * 9 * 16 self.fc1 = nn.Linear(self.size, 256) self.fc2 = nn.Linear(256, 1) self.sig = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv2.bias primals_5 = self.fc1.weight primals_6 = self.fc1.bias primals_7 = self.fc2.weight primals_8 = self.fc2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
akashkmr27089/ReinforcementLearning_Udacity_Deep_Reinforcemnt_Learning
Policy
false
3,083
[ "MIT" ]
0
b7dc13b0116898848d8d0b8a95b7af182982bd6b
https://github.com/akashkmr27089/ReinforcementLearning_Udacity_Deep_Reinforcemnt_Learning/tree/b7dc13b0116898848d8d0b8a95b7af182982bd6b
Ranking
import torch class Ranking(torch.nn.Module): def __init__(self, delta, use_cosine_similarity): super(Ranking, self).__init__() self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1) self.measure_similarity = self._get_similarity_function( use_cosine_similarity) self.delta = delta self.criterion = torch.nn.MSELoss(reduction='sum') if not use_cosine_similarity: dim = 64 self.projector = torch.nn.Linear(dim, dim, bias=False) def _get_similarity_function(self, use_cosine_similarity): if use_cosine_similarity: self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1) return self._cosine_simililarity else: return self._metrics_similarity def _metrics_similarity(self, x, y): return torch.sum(torch.square(self.projector(x) - self.projector(y) ), dim=1) def _cosine_simililarity(self, x, y): v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0)) return v def forward(self, zis, zjs, z_anchor): """ :param zis: similar to anchor :param zjs: dissimilar to anchor :param z_anchor: anchor image :return: """ s1 = self.measure_similarity(zis, z_anchor) s2 = self.measure_similarity(zjs, z_anchor) margin = torch.clamp(s2 - s1 + self.delta, min=0, max=1.0) loss = self.criterion(margin, torch.zeros_like(margin)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'delta': 4, 'use_cosine_similarity': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 256 x4 = xindex % 64 x1 = xindex // 4 % 16 x5 = xindex % 256 x6 = xindex // 4 % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (4 * x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1 + 64 * x3), xmask, eviction_policy ='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1 + 64 * x3), xmask, eviction_policy ='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1 + 64 * x3), xmask, eviction_policy ='evict_last') tmp16 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + 4 * x6, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (1 + 4 * x6), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (2 + 4 * x6), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + (3 + 4 * x6), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr2 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr2 + (4 * x1 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr2 + (1 + 4 * x1 + 64 * x3), xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr2 + (2 + 4 * x1 + 64 * x3), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr2 + (3 + 4 * x1 + 64 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tmp34 = tmp33 * tmp33 tmp36 = tmp35 * tmp35 tmp37 = tmp34 + tmp36 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp42 = tmp41 * tmp41 tmp43 = tmp40 + tmp42 tmp44 = libdevice.sqrt(tmp43) tmp45 = triton_helpers.maximum(tmp44, tmp13) tmp46 = tmp32 / tmp45 tmp47 = tmp46 * tmp30 tl.store(out_ptr0 + x7, tmp31, xmask) tl.store(out_ptr1 + x7, tmp47, xmask) @triton.jit def triton_per_fused_add_clamp_mse_loss_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = tmp6 - tmp13 tmp15 = 4.0 tmp16 = tmp14 + tmp15 tmp17 = 0.0 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = 1.0 tmp20 = triton_helpers.minimum(tmp18, tmp19) tmp21 = tmp20 * tmp20 tmp22 = tl.broadcast_to(tmp21, [RBLOCK]) tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(1024)]( arg2_1, arg1_1, arg0_1, buf0, buf1, 1024, XBLOCK=128, num_warps =4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf2 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_clamp_mse_loss_sub_sum_1[grid(1)](buf0, buf1, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf1 return buf2, class RankingNew(torch.nn.Module): def __init__(self, delta, use_cosine_similarity): super(RankingNew, self).__init__() self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1) self.measure_similarity = self._get_similarity_function( use_cosine_similarity) self.delta = delta self.criterion = torch.nn.MSELoss(reduction='sum') if not use_cosine_similarity: dim = 64 self.projector = torch.nn.Linear(dim, dim, bias=False) def _get_similarity_function(self, use_cosine_similarity): if use_cosine_similarity: self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1) return self._cosine_simililarity else: return self._metrics_similarity def _metrics_similarity(self, x, y): return torch.sum(torch.square(self.projector(x) - self.projector(y) ), dim=1) def _cosine_simililarity(self, x, y): v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0)) return v def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
alexcapstick/minder_utils
Ranking
false
3,084
[ "MIT" ]
0
3bb9380b7796b5dd5b995ce1839ea6a94321021d
https://github.com/alexcapstick/minder_utils/tree/3bb9380b7796b5dd5b995ce1839ea6a94321021d
outconv
import torch import torch.nn as nn class outconv(nn.Module): def __init__(self, in_ch, out_ch): super(outconv, self).__init__() self.conv = nn.Conv2d(in_ch, out_ch, 1) self.sig = nn.Sigmoid() def forward(self, x): x_conv = self.conv(x) x = self.sig(x_conv) return x[:, :, :101, :101].squeeze() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_sigmoid_sigmoid_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = tmp3 * tmp5 tl.store(in_out_ptr0 + x3, tmp3, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_sigmoid_sigmoid_backward_0[grid(256)](buf1 , primals_2, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf2 class outconvNew(nn.Module): def __init__(self, in_ch, out_ch): super(outconvNew, self).__init__() self.conv = nn.Conv2d(in_ch, out_ch, 1) self.sig = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
allen-q/pytorch
outconv
false
3,085
[ "MIT" ]
0
76947f8d6f0bcee04425ad69f93b9a5e3a5060ae
https://github.com/allen-q/pytorch/tree/76947f8d6f0bcee04425ad69f93b9a5e3a5060ae
DurationPredictorLoss
import torch class DurationPredictorLoss(torch.nn.Module): """Loss function module for duration predictor. The loss value is Calculated in log domain to make it Gaussian. Args: offset (float, optional): Offset value to avoid nan in log domain. """ def __init__(self, offset=1.0): super(DurationPredictorLoss, self).__init__() self.criterion = torch.nn.MSELoss() self.offset = offset def forward(self, outputs, targets): """Calculate forward propagation. Args: outputs (Tensor): Batch of prediction durations in log domain (B, T) targets (LongTensor): Batch of groundtruth durations in linear domain (B, T) Returns: Tensor: Mean squared error loss value. Note: `outputs` is in log domain but `targets` is in linear domain. """ targets = torch.log(targets.float() + self.offset) loss = self.criterion(outputs, targets) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = tmp0 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class DurationPredictorLossNew(torch.nn.Module): """Loss function module for duration predictor. The loss value is Calculated in log domain to make it Gaussian. Args: offset (float, optional): Offset value to avoid nan in log domain. """ def __init__(self, offset=1.0): super(DurationPredictorLossNew, self).__init__() self.criterion = torch.nn.MSELoss() self.offset = offset def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
akreal/end-to-end-slu-espnet
DurationPredictorLoss
false
3,086
[ "Apache-2.0" ]
0
0b16dc8b10b31a4567b3312678a753a94bb200da
https://github.com/akreal/end-to-end-slu-espnet/tree/0b16dc8b10b31a4567b3312678a753a94bb200da
RankingLoss
import torch import torch.nn as nn class RankingLoss(nn.Module): """ ref: https://arxiv.org/abs/2002.10857 """ def __init__(self, m: 'float', gamma: 'float') ->None: super(RankingLoss, self).__init__() self.m = m self.gamma = gamma self.soft_plus = nn.Softplus() def forward(self, y_pred, y_true): y_pred = (1 - 2 * y_true) * y_pred y_pred_neg = y_pred - y_true * 1000000000000.0 y_pred_pos = y_pred - (1 - y_true) * 1000000000000.0 torch.clamp_min(y_pred_pos.detach() + 1 + self.m, min=0.0) torch.clamp_min(y_pred_neg.detach() + self.m, min=0.0) logit_p = y_pred_pos * self.gamma logit_n = (y_pred_neg - self.m) * self.gamma loss = self.soft_plus(torch.logsumexp(logit_n, dim=-1) + torch. logsumexp(logit_p, dim=-1)) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'m': 4, 'gamma': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_logsumexp_mean_mul_rsub_softplus_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp6 = tmp4 * tmp5 tmp7 = 1000000000000.0 tmp8 = tmp0 * tmp7 tmp9 = tmp6 - tmp8 tmp10 = 4.0 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp10 tmp14 = tmp13 * tmp1 tmp15 = tmp3 - tmp14 tmp17 = tmp15 * tmp16 tmp18 = tmp13 * tmp7 tmp19 = tmp17 - tmp18 tmp20 = tmp19 - tmp10 tmp21 = tmp20 * tmp10 tmp22 = triton_helpers.maximum(tmp12, tmp21) tmp24 = tmp23 * tmp1 tmp25 = tmp3 - tmp24 tmp27 = tmp25 * tmp26 tmp28 = tmp23 * tmp7 tmp29 = tmp27 - tmp28 tmp30 = tmp29 - tmp10 tmp31 = tmp30 * tmp10 tmp32 = triton_helpers.maximum(tmp22, tmp31) tmp34 = tmp33 * tmp1 tmp35 = tmp3 - tmp34 tmp37 = tmp35 * tmp36 tmp38 = tmp33 * tmp7 tmp39 = tmp37 - tmp38 tmp40 = tmp39 - tmp10 tmp41 = tmp40 * tmp10 tmp42 = triton_helpers.maximum(tmp32, tmp41) tmp43 = tl_math.abs(tmp42) tmp44 = float('inf') tmp45 = tmp43 == tmp44 tmp46 = 0.0 tmp47 = tl.where(tmp45, tmp46, tmp42) tmp48 = tmp12 - tmp47 tmp49 = tl_math.exp(tmp48) tmp50 = tmp21 - tmp47 tmp51 = tl_math.exp(tmp50) tmp52 = tmp49 + tmp51 tmp53 = tmp31 - tmp47 tmp54 = tl_math.exp(tmp53) tmp55 = tmp52 + tmp54 tmp56 = tmp41 - tmp47 tmp57 = tl_math.exp(tmp56) tmp58 = tmp55 + tmp57 tmp59 = tmp3 - tmp0 tmp60 = tmp59 * tmp7 tmp61 = tmp6 - tmp60 tmp62 = tmp61 * tmp10 tmp63 = tmp3 - tmp13 tmp64 = tmp63 * tmp7 tmp65 = tmp17 - tmp64 tmp66 = tmp65 * tmp10 tmp67 = triton_helpers.maximum(tmp62, tmp66) tmp68 = tmp3 - tmp23 tmp69 = tmp68 * tmp7 tmp70 = tmp27 - tmp69 tmp71 = tmp70 * tmp10 tmp72 = triton_helpers.maximum(tmp67, tmp71) tmp73 = tmp3 - tmp33 tmp74 = tmp73 * tmp7 tmp75 = tmp37 - tmp74 tmp76 = tmp75 * tmp10 tmp77 = triton_helpers.maximum(tmp72, tmp76) tmp78 = tl_math.abs(tmp77) tmp79 = tmp78 == tmp44 tmp80 = tl.where(tmp79, tmp46, tmp77) tmp81 = tmp62 - tmp80 tmp82 = tl_math.exp(tmp81) tmp83 = tmp66 - tmp80 tmp84 = tl_math.exp(tmp83) tmp85 = tmp82 + tmp84 tmp86 = tmp71 - tmp80 tmp87 = tl_math.exp(tmp86) tmp88 = tmp85 + tmp87 tmp89 = tmp76 - tmp80 tmp90 = tl_math.exp(tmp89) tmp91 = tmp88 + tmp90 tmp92 = tl_math.log(tmp58) tmp93 = tmp92 + tmp47 tmp94 = tl_math.log(tmp91) tmp95 = tmp94 + tmp80 tmp96 = tmp93 + tmp95 tmp97 = tmp96 * tmp3 tmp98 = 20.0 tmp99 = tmp97 > tmp98 tmp100 = tl_math.exp(tmp97) tmp101 = libdevice.log1p(tmp100) tmp102 = tmp101 * tmp3 tmp103 = tl.where(tmp99, tmp96, tmp102) tmp104 = tl.broadcast_to(tmp103, [XBLOCK, RBLOCK]) tmp106 = tl.sum(tmp104, 1)[:, None] tmp107 = 64.0 tmp108 = tmp106 / tmp107 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp108, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 get_raw_stream(0) triton_per_fused_add_logsumexp_mean_mul_rsub_softplus_sub_0[grid(1)]( buf5, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf5, class RankingLossNew(nn.Module): """ ref: https://arxiv.org/abs/2002.10857 """ def __init__(self, m: 'float', gamma: 'float') ->None: super(RankingLossNew, self).__init__() self.m = m self.gamma = gamma self.soft_plus = nn.Softplus() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
alipay/Parameter_Inference_Efficient_PIE
RankingLoss
false
3,087
[ "Apache-2.0" ]
0
660add7705432a526aa3335fff3d8cf1c7d015a4
https://github.com/alipay/Parameter_Inference_Efficient_PIE/tree/660add7705432a526aa3335fff3d8cf1c7d015a4
CNN
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = x.view(-1, 1, 28, 28) x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 1, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 23040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 576 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 5760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x3 = xindex // 12 x2 = xindex // 1440 x4 = xindex % 1440 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x3), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + 1536 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp18, xmask) tl.store(out_ptr2 + x2, tmp20, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (10, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_3, (10,), (1,)) assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (50, 320), (320, 1)) assert_size_stride(primals_7, (50,), (1,)) assert_size_stride(primals_8, (10, 50), (50, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 24, 24), (5760, 576, 24, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(23040)](buf1, primals_3, 23040, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10, 12, 12), (1536, 144, 12, 1), torch.int8) buf3 = empty_strided_cuda((4, 10, 12, 12), (1440, 144, 12, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_1[grid(5760)](buf1, buf2, buf3, 5760, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 20, 8, 8), (1280, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(5120)](buf5, primals_5, 5120, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.int8) buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32 ) buf14 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid (1280)](buf5, buf6, buf7, buf14, 1280, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 320), (320, 1), 0), reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(200)](buf9, primals_7, 200, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf10) del primals_9 buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__softmax_5[grid(4)](buf10, buf13, 4, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf10 return (buf13, primals_2, primals_4, primals_1, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 320), (320, 1), 0), buf9, buf13, primals_8, primals_6, buf14) class CNNNew(nn.Module): def __init__(self): super(CNNNew, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
alexandergg/Deep-Learning-with-Pytorch-and-Azure
CNN
false
3,088
[ "MIT" ]
0
8999ce815469ecaf9fb61998372a6e7507c15943
https://github.com/alexandergg/Deep-Learning-with-Pytorch-and-Azure/tree/8999ce815469ecaf9fb61998372a6e7507c15943
NetVLAD
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from sklearn.neighbors import NearestNeighbors class NetVLAD(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, normalize_input=True, vladv2=False): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. vladv2 : bool If true, use vladv2 otherwise use vladv1 """ super(NetVLAD, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = 0 self.vladv2 = vladv2 self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias= vladv2) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) def init_params(self, clsts, traindescs): if self.vladv2 is False: clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clstsAssign, traindescs.T) dots.sort(0) dots = dots[::-1, :] self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None else: knn = NearestNeighbors(n_jobs=-1) knn.fit(traindescs) del traindescs dsSq = np.square(knn.kneighbors(clsts, 2)[1]) del knn self.alpha = (-np.log(0.01) / np.mean(dsSq[:, 1] - dsSq[:, 0]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) del clsts, dsSq self.conv.weight = nn.Parameter((2.0 * self.alpha * self. centroids).unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm (dim=1)) def forward(self, x): N, C = x.shape[:2] if self.normalize_input: x = F.normalize(x, p=2, dim=1) soft_assign = self.conv(x).view(N, self.num_clusters, -1) soft_assign = F.softmax(soft_assign, dim=1) x_flatten = x.view(N, C, -1) vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, layout =x.layout, device=x.device) for C in range(self.num_clusters): residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3 ) - self.centroids[C:C + 1, :].expand(x_flatten.size(-1), - 1, -1).permute(1, 2, 0).unsqueeze(0) residual *= soft_assign[:, C:C + 1, :].unsqueeze(2) vlad[:, C:C + 1, :] = residual.sum(dim=-1) vlad = F.normalize(vlad, p=2, dim=2) vlad = vlad.view(x.size(0), -1) vlad = F.normalize(vlad, p=2, dim=1) return vlad def get_inputs(): return [torch.rand([4, 128, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn from sklearn.neighbors import NearestNeighbors assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61, out_ptr62, out_ptr63, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 524288 x1 = xindex // 4096 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last') tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last') tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last') tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last') tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last') tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last') tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last') tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last') tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last') tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last') tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last') tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last') tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last') tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last') tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last') tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last') tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last') tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last') tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last') tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp7 = tmp5 - tmp6 tmp9 = tmp5 - tmp8 tmp11 = tmp5 - tmp10 tmp13 = tmp5 - tmp12 tmp15 = tmp5 - tmp14 tmp17 = tmp5 - tmp16 tmp19 = tmp5 - tmp18 tmp21 = tmp5 - tmp20 tmp23 = tmp5 - tmp22 tmp25 = tmp5 - tmp24 tmp27 = tmp5 - tmp26 tmp29 = tmp5 - tmp28 tmp31 = tmp5 - tmp30 tmp33 = tmp5 - tmp32 tmp35 = tmp5 - tmp34 tmp37 = tmp5 - tmp36 tmp39 = tmp5 - tmp38 tmp41 = tmp5 - tmp40 tmp43 = tmp5 - tmp42 tmp45 = tmp5 - tmp44 tmp47 = tmp5 - tmp46 tmp49 = tmp5 - tmp48 tmp51 = tmp5 - tmp50 tmp53 = tmp5 - tmp52 tmp55 = tmp5 - tmp54 tmp57 = tmp5 - tmp56 tmp59 = tmp5 - tmp58 tmp61 = tmp5 - tmp60 tmp63 = tmp5 - tmp62 tmp65 = tmp5 - tmp64 tmp67 = tmp5 - tmp66 tmp69 = tmp5 - tmp68 tmp71 = tmp5 - tmp70 tmp73 = tmp5 - tmp72 tmp75 = tmp5 - tmp74 tmp77 = tmp5 - tmp76 tmp79 = tmp5 - tmp78 tmp81 = tmp5 - tmp80 tmp83 = tmp5 - tmp82 tmp85 = tmp5 - tmp84 tmp87 = tmp5 - tmp86 tmp89 = tmp5 - tmp88 tmp91 = tmp5 - tmp90 tmp93 = tmp5 - tmp92 tmp95 = tmp5 - tmp94 tmp97 = tmp5 - tmp96 tmp99 = tmp5 - tmp98 tmp101 = tmp5 - tmp100 tmp103 = tmp5 - tmp102 tmp105 = tmp5 - tmp104 tmp107 = tmp5 - tmp106 tmp109 = tmp5 - tmp108 tmp111 = tmp5 - tmp110 tmp113 = tmp5 - tmp112 tmp115 = tmp5 - tmp114 tmp117 = tmp5 - tmp116 tmp119 = tmp5 - tmp118 tmp121 = tmp5 - tmp120 tmp123 = tmp5 - tmp122 tmp125 = tmp5 - tmp124 tmp127 = tmp5 - tmp126 tmp129 = tmp5 - tmp128 tmp131 = tmp5 - tmp130 tl.store(out_ptr0 + x3, tmp5, None) tl.store(out_ptr1 + x3, tmp7, None) tl.store(out_ptr2 + x3, tmp9, None) tl.store(out_ptr3 + x3, tmp11, None) tl.store(out_ptr4 + x3, tmp13, None) tl.store(out_ptr5 + x3, tmp15, None) tl.store(out_ptr6 + x3, tmp17, None) tl.store(out_ptr7 + x3, tmp19, None) tl.store(out_ptr8 + x3, tmp21, None) tl.store(out_ptr9 + x3, tmp23, None) tl.store(out_ptr10 + x3, tmp25, None) tl.store(out_ptr11 + x3, tmp27, None) tl.store(out_ptr12 + x3, tmp29, None) tl.store(out_ptr13 + x3, tmp31, None) tl.store(out_ptr14 + x3, tmp33, None) tl.store(out_ptr15 + x3, tmp35, None) tl.store(out_ptr16 + x3, tmp37, None) tl.store(out_ptr17 + x3, tmp39, None) tl.store(out_ptr18 + x3, tmp41, None) tl.store(out_ptr19 + x3, tmp43, None) tl.store(out_ptr20 + x3, tmp45, None) tl.store(out_ptr21 + x3, tmp47, None) tl.store(out_ptr22 + x3, tmp49, None) tl.store(out_ptr23 + x3, tmp51, None) tl.store(out_ptr24 + x3, tmp53, None) tl.store(out_ptr25 + x3, tmp55, None) tl.store(out_ptr26 + x3, tmp57, None) tl.store(out_ptr27 + x3, tmp59, None) tl.store(out_ptr28 + x3, tmp61, None) tl.store(out_ptr29 + x3, tmp63, None) tl.store(out_ptr30 + x3, tmp65, None) tl.store(out_ptr31 + x3, tmp67, None) tl.store(out_ptr32 + x3, tmp69, None) tl.store(out_ptr33 + x3, tmp71, None) tl.store(out_ptr34 + x3, tmp73, None) tl.store(out_ptr35 + x3, tmp75, None) tl.store(out_ptr36 + x3, tmp77, None) tl.store(out_ptr37 + x3, tmp79, None) tl.store(out_ptr38 + x3, tmp81, None) tl.store(out_ptr39 + x3, tmp83, None) tl.store(out_ptr40 + x3, tmp85, None) tl.store(out_ptr41 + x3, tmp87, None) tl.store(out_ptr42 + x3, tmp89, None) tl.store(out_ptr43 + x3, tmp91, None) tl.store(out_ptr44 + x3, tmp93, None) tl.store(out_ptr45 + x3, tmp95, None) tl.store(out_ptr46 + x3, tmp97, None) tl.store(out_ptr47 + x3, tmp99, None) tl.store(out_ptr48 + x3, tmp101, None) tl.store(out_ptr49 + x3, tmp103, None) tl.store(out_ptr50 + x3, tmp105, None) tl.store(out_ptr51 + x3, tmp107, None) tl.store(out_ptr52 + x3, tmp109, None) tl.store(out_ptr53 + x3, tmp111, None) tl.store(out_ptr54 + x3, tmp113, None) tl.store(out_ptr55 + x3, tmp115, None) tl.store(out_ptr56 + x3, tmp117, None) tl.store(out_ptr57 + x3, tmp119, None) tl.store(out_ptr58 + x3, tmp121, None) tl.store(out_ptr59 + x3, tmp123, None) tl.store(out_ptr60 + x3, tmp125, None) tl.store(out_ptr61 + x3, tmp127, None) tl.store(out_ptr62 + x3, tmp129, None) tl.store(out_ptr63 + x3, tmp131, None) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 262144 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) tl.store(out_ptr1 + x3, tmp8, None) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 128 tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') x1 = xindex // 128 _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tl.load(in_ptr2 + (r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp14 = tl.load(in_ptr2 + (4096 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp23 = tl.load(in_ptr2 + (8192 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp32 = tl.load(in_ptr2 + (12288 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp41 = tl.load(in_ptr2 + (16384 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp50 = tl.load(in_ptr2 + (20480 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp59 = tl.load(in_ptr2 + (24576 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp68 = tl.load(in_ptr2 + (28672 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr2 + (32768 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp86 = tl.load(in_ptr2 + (36864 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp95 = tl.load(in_ptr2 + (40960 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp103 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp104 = tl.load(in_ptr2 + (45056 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp112 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp113 = tl.load(in_ptr2 + (49152 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp121 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp122 = tl.load(in_ptr2 + (53248 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp130 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp131 = tl.load(in_ptr2 + (57344 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp139 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp140 = tl.load(in_ptr2 + (61440 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp148 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp149 = tl.load(in_ptr2 + (65536 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp157 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp158 = tl.load(in_ptr2 + (69632 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp167 = tl.load(in_ptr2 + (73728 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp175 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp176 = tl.load(in_ptr2 + (77824 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp184 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp185 = tl.load(in_ptr2 + (81920 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp193 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp194 = tl.load(in_ptr2 + (86016 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp202 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp203 = tl.load(in_ptr2 + (90112 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp211 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp212 = tl.load(in_ptr2 + (94208 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp220 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp221 = tl.load(in_ptr2 + (98304 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp229 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp230 = tl.load(in_ptr2 + (102400 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp239 = tl.load(in_ptr2 + (106496 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp247 = tl.load(in_ptr31 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp248 = tl.load(in_ptr2 + (110592 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp256 = tl.load(in_ptr32 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp257 = tl.load(in_ptr2 + (114688 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask & xmask, tmp12, _tmp11) tmp15 = tmp14 - tmp4 tmp16 = tl_math.exp(tmp15) tmp17 = tmp16 / tmp7 tmp18 = tmp13 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp24 = tmp23 - tmp4 tmp25 = tl_math.exp(tmp24) tmp26 = tmp25 / tmp7 tmp27 = tmp22 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp33 = tmp32 - tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 / tmp7 tmp36 = tmp31 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = _tmp38 + tmp37 _tmp38 = tl.where(rmask & xmask, tmp39, _tmp38) tmp42 = tmp41 - tmp4 tmp43 = tl_math.exp(tmp42) tmp44 = tmp43 / tmp7 tmp45 = tmp40 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = _tmp47 + tmp46 _tmp47 = tl.where(rmask & xmask, tmp48, _tmp47) tmp51 = tmp50 - tmp4 tmp52 = tl_math.exp(tmp51) tmp53 = tmp52 / tmp7 tmp54 = tmp49 * tmp53 tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp57 = _tmp56 + tmp55 _tmp56 = tl.where(rmask & xmask, tmp57, _tmp56) tmp60 = tmp59 - tmp4 tmp61 = tl_math.exp(tmp60) tmp62 = tmp61 / tmp7 tmp63 = tmp58 * tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask & xmask, tmp66, _tmp65) tmp69 = tmp68 - tmp4 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 / tmp7 tmp72 = tmp67 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = _tmp74 + tmp73 _tmp74 = tl.where(rmask & xmask, tmp75, _tmp74) tmp78 = tmp77 - tmp4 tmp79 = tl_math.exp(tmp78) tmp80 = tmp79 / tmp7 tmp81 = tmp76 * tmp80 tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK]) tmp84 = _tmp83 + tmp82 _tmp83 = tl.where(rmask & xmask, tmp84, _tmp83) tmp87 = tmp86 - tmp4 tmp88 = tl_math.exp(tmp87) tmp89 = tmp88 / tmp7 tmp90 = tmp85 * tmp89 tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK]) tmp93 = _tmp92 + tmp91 _tmp92 = tl.where(rmask & xmask, tmp93, _tmp92) tmp96 = tmp95 - tmp4 tmp97 = tl_math.exp(tmp96) tmp98 = tmp97 / tmp7 tmp99 = tmp94 * tmp98 tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = _tmp101 + tmp100 _tmp101 = tl.where(rmask & xmask, tmp102, _tmp101) tmp105 = tmp104 - tmp4 tmp106 = tl_math.exp(tmp105) tmp107 = tmp106 / tmp7 tmp108 = tmp103 * tmp107 tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK]) tmp111 = _tmp110 + tmp109 _tmp110 = tl.where(rmask & xmask, tmp111, _tmp110) tmp114 = tmp113 - tmp4 tmp115 = tl_math.exp(tmp114) tmp116 = tmp115 / tmp7 tmp117 = tmp112 * tmp116 tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK]) tmp120 = _tmp119 + tmp118 _tmp119 = tl.where(rmask & xmask, tmp120, _tmp119) tmp123 = tmp122 - tmp4 tmp124 = tl_math.exp(tmp123) tmp125 = tmp124 / tmp7 tmp126 = tmp121 * tmp125 tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK]) tmp129 = _tmp128 + tmp127 _tmp128 = tl.where(rmask & xmask, tmp129, _tmp128) tmp132 = tmp131 - tmp4 tmp133 = tl_math.exp(tmp132) tmp134 = tmp133 / tmp7 tmp135 = tmp130 * tmp134 tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = _tmp137 + tmp136 _tmp137 = tl.where(rmask & xmask, tmp138, _tmp137) tmp141 = tmp140 - tmp4 tmp142 = tl_math.exp(tmp141) tmp143 = tmp142 / tmp7 tmp144 = tmp139 * tmp143 tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = _tmp146 + tmp145 _tmp146 = tl.where(rmask & xmask, tmp147, _tmp146) tmp150 = tmp149 - tmp4 tmp151 = tl_math.exp(tmp150) tmp152 = tmp151 / tmp7 tmp153 = tmp148 * tmp152 tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = _tmp155 + tmp154 _tmp155 = tl.where(rmask & xmask, tmp156, _tmp155) tmp159 = tmp158 - tmp4 tmp160 = tl_math.exp(tmp159) tmp161 = tmp160 / tmp7 tmp162 = tmp157 * tmp161 tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK]) tmp165 = _tmp164 + tmp163 _tmp164 = tl.where(rmask & xmask, tmp165, _tmp164) tmp168 = tmp167 - tmp4 tmp169 = tl_math.exp(tmp168) tmp170 = tmp169 / tmp7 tmp171 = tmp166 * tmp170 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = _tmp173 + tmp172 _tmp173 = tl.where(rmask & xmask, tmp174, _tmp173) tmp177 = tmp176 - tmp4 tmp178 = tl_math.exp(tmp177) tmp179 = tmp178 / tmp7 tmp180 = tmp175 * tmp179 tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK]) tmp183 = _tmp182 + tmp181 _tmp182 = tl.where(rmask & xmask, tmp183, _tmp182) tmp186 = tmp185 - tmp4 tmp187 = tl_math.exp(tmp186) tmp188 = tmp187 / tmp7 tmp189 = tmp184 * tmp188 tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK]) tmp192 = _tmp191 + tmp190 _tmp191 = tl.where(rmask & xmask, tmp192, _tmp191) tmp195 = tmp194 - tmp4 tmp196 = tl_math.exp(tmp195) tmp197 = tmp196 / tmp7 tmp198 = tmp193 * tmp197 tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK]) tmp201 = _tmp200 + tmp199 _tmp200 = tl.where(rmask & xmask, tmp201, _tmp200) tmp204 = tmp203 - tmp4 tmp205 = tl_math.exp(tmp204) tmp206 = tmp205 / tmp7 tmp207 = tmp202 * tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = _tmp209 + tmp208 _tmp209 = tl.where(rmask & xmask, tmp210, _tmp209) tmp213 = tmp212 - tmp4 tmp214 = tl_math.exp(tmp213) tmp215 = tmp214 / tmp7 tmp216 = tmp211 * tmp215 tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK]) tmp219 = _tmp218 + tmp217 _tmp218 = tl.where(rmask & xmask, tmp219, _tmp218) tmp222 = tmp221 - tmp4 tmp223 = tl_math.exp(tmp222) tmp224 = tmp223 / tmp7 tmp225 = tmp220 * tmp224 tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK]) tmp228 = _tmp227 + tmp226 _tmp227 = tl.where(rmask & xmask, tmp228, _tmp227) tmp231 = tmp230 - tmp4 tmp232 = tl_math.exp(tmp231) tmp233 = tmp232 / tmp7 tmp234 = tmp229 * tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = _tmp236 + tmp235 _tmp236 = tl.where(rmask & xmask, tmp237, _tmp236) tmp240 = tmp239 - tmp4 tmp241 = tl_math.exp(tmp240) tmp242 = tmp241 / tmp7 tmp243 = tmp238 * tmp242 tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK]) tmp246 = _tmp245 + tmp244 _tmp245 = tl.where(rmask & xmask, tmp246, _tmp245) tmp249 = tmp248 - tmp4 tmp250 = tl_math.exp(tmp249) tmp251 = tmp250 / tmp7 tmp252 = tmp247 * tmp251 tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK]) tmp255 = _tmp254 + tmp253 _tmp254 = tl.where(rmask & xmask, tmp255, _tmp254) tmp258 = tmp257 - tmp4 tmp259 = tl_math.exp(tmp258) tmp260 = tmp259 / tmp7 tmp261 = tmp256 * tmp260 tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK]) tmp264 = _tmp263 + tmp262 _tmp263 = tl.where(rmask & xmask, tmp264, _tmp263) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + x3, tmp11, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + x3, tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + x3, tmp29, xmask) tmp38 = tl.sum(_tmp38, 1)[:, None] tl.store(out_ptr3 + x3, tmp38, xmask) tmp47 = tl.sum(_tmp47, 1)[:, None] tl.store(out_ptr4 + x3, tmp47, xmask) tmp56 = tl.sum(_tmp56, 1)[:, None] tl.store(out_ptr5 + x3, tmp56, xmask) tmp65 = tl.sum(_tmp65, 1)[:, None] tl.store(out_ptr6 + x3, tmp65, xmask) tmp74 = tl.sum(_tmp74, 1)[:, None] tl.store(out_ptr7 + x3, tmp74, xmask) tmp83 = tl.sum(_tmp83, 1)[:, None] tl.store(out_ptr8 + x3, tmp83, xmask) tmp92 = tl.sum(_tmp92, 1)[:, None] tl.store(out_ptr9 + x3, tmp92, xmask) tmp101 = tl.sum(_tmp101, 1)[:, None] tl.store(out_ptr10 + x3, tmp101, xmask) tmp110 = tl.sum(_tmp110, 1)[:, None] tl.store(out_ptr11 + x3, tmp110, xmask) tmp119 = tl.sum(_tmp119, 1)[:, None] tl.store(out_ptr12 + x3, tmp119, xmask) tmp128 = tl.sum(_tmp128, 1)[:, None] tl.store(out_ptr13 + x3, tmp128, xmask) tmp137 = tl.sum(_tmp137, 1)[:, None] tl.store(out_ptr14 + x3, tmp137, xmask) tmp146 = tl.sum(_tmp146, 1)[:, None] tl.store(out_ptr15 + x3, tmp146, xmask) tmp155 = tl.sum(_tmp155, 1)[:, None] tl.store(out_ptr16 + x3, tmp155, xmask) tmp164 = tl.sum(_tmp164, 1)[:, None] tl.store(out_ptr17 + x3, tmp164, xmask) tmp173 = tl.sum(_tmp173, 1)[:, None] tl.store(out_ptr18 + x3, tmp173, xmask) tmp182 = tl.sum(_tmp182, 1)[:, None] tl.store(out_ptr19 + x3, tmp182, xmask) tmp191 = tl.sum(_tmp191, 1)[:, None] tl.store(out_ptr20 + x3, tmp191, xmask) tmp200 = tl.sum(_tmp200, 1)[:, None] tl.store(out_ptr21 + x3, tmp200, xmask) tmp209 = tl.sum(_tmp209, 1)[:, None] tl.store(out_ptr22 + x3, tmp209, xmask) tmp218 = tl.sum(_tmp218, 1)[:, None] tl.store(out_ptr23 + x3, tmp218, xmask) tmp227 = tl.sum(_tmp227, 1)[:, None] tl.store(out_ptr24 + x3, tmp227, xmask) tmp236 = tl.sum(_tmp236, 1)[:, None] tl.store(out_ptr25 + x3, tmp236, xmask) tmp245 = tl.sum(_tmp245, 1)[:, None] tl.store(out_ptr26 + x3, tmp245, xmask) tmp254 = tl.sum(_tmp254, 1)[:, None] tl.store(out_ptr27 + x3, tmp254, xmask) tmp263 = tl.sum(_tmp263, 1)[:, None] tl.store(out_ptr28 + x3, tmp263, xmask) @triton.jit def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = xindex // 128 _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (118784 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (122880 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (126976 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (131072 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (135168 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (139264 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (143360 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp66 = tl.load(in_ptr1 + (147456 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp75 = tl.load(in_ptr1 + (151552 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp84 = tl.load(in_ptr1 + (155648 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp92 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp93 = tl.load(in_ptr1 + (159744 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp101 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp102 = tl.load(in_ptr1 + (163840 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp110 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp111 = tl.load(in_ptr1 + (167936 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp119 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp120 = tl.load(in_ptr1 + (172032 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp128 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp129 = tl.load(in_ptr1 + (176128 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp138 = tl.load(in_ptr1 + (180224 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp147 = tl.load(in_ptr1 + (184320 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp155 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp156 = tl.load(in_ptr1 + (188416 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp164 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp165 = tl.load(in_ptr1 + (192512 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp173 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp174 = tl.load(in_ptr1 + (196608 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp182 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp183 = tl.load(in_ptr1 + (200704 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp191 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp192 = tl.load(in_ptr1 + (204800 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp200 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp201 = tl.load(in_ptr1 + (208896 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp210 = tl.load(in_ptr1 + (212992 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp219 = tl.load(in_ptr1 + (217088 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp227 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp228 = tl.load(in_ptr1 + (221184 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp236 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp237 = tl.load(in_ptr1 + (225280 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp245 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp246 = tl.load(in_ptr1 + (229376 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp67 = tmp66 - tmp2 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 / tmp5 tmp70 = tmp65 * tmp69 tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK]) tmp73 = _tmp72 + tmp71 _tmp72 = tl.where(rmask & xmask, tmp73, _tmp72) tmp76 = tmp75 - tmp2 tmp77 = tl_math.exp(tmp76) tmp78 = tmp77 / tmp5 tmp79 = tmp74 * tmp78 tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK]) tmp82 = _tmp81 + tmp80 _tmp81 = tl.where(rmask & xmask, tmp82, _tmp81) tmp85 = tmp84 - tmp2 tmp86 = tl_math.exp(tmp85) tmp87 = tmp86 / tmp5 tmp88 = tmp83 * tmp87 tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = _tmp90 + tmp89 _tmp90 = tl.where(rmask & xmask, tmp91, _tmp90) tmp94 = tmp93 - tmp2 tmp95 = tl_math.exp(tmp94) tmp96 = tmp95 / tmp5 tmp97 = tmp92 * tmp96 tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK]) tmp100 = _tmp99 + tmp98 _tmp99 = tl.where(rmask & xmask, tmp100, _tmp99) tmp103 = tmp102 - tmp2 tmp104 = tl_math.exp(tmp103) tmp105 = tmp104 / tmp5 tmp106 = tmp101 * tmp105 tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK]) tmp109 = _tmp108 + tmp107 _tmp108 = tl.where(rmask & xmask, tmp109, _tmp108) tmp112 = tmp111 - tmp2 tmp113 = tl_math.exp(tmp112) tmp114 = tmp113 / tmp5 tmp115 = tmp110 * tmp114 tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK]) tmp118 = _tmp117 + tmp116 _tmp117 = tl.where(rmask & xmask, tmp118, _tmp117) tmp121 = tmp120 - tmp2 tmp122 = tl_math.exp(tmp121) tmp123 = tmp122 / tmp5 tmp124 = tmp119 * tmp123 tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK]) tmp127 = _tmp126 + tmp125 _tmp126 = tl.where(rmask & xmask, tmp127, _tmp126) tmp130 = tmp129 - tmp2 tmp131 = tl_math.exp(tmp130) tmp132 = tmp131 / tmp5 tmp133 = tmp128 * tmp132 tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = _tmp135 + tmp134 _tmp135 = tl.where(rmask & xmask, tmp136, _tmp135) tmp139 = tmp138 - tmp2 tmp140 = tl_math.exp(tmp139) tmp141 = tmp140 / tmp5 tmp142 = tmp137 * tmp141 tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK]) tmp145 = _tmp144 + tmp143 _tmp144 = tl.where(rmask & xmask, tmp145, _tmp144) tmp148 = tmp147 - tmp2 tmp149 = tl_math.exp(tmp148) tmp150 = tmp149 / tmp5 tmp151 = tmp146 * tmp150 tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK]) tmp154 = _tmp153 + tmp152 _tmp153 = tl.where(rmask & xmask, tmp154, _tmp153) tmp157 = tmp156 - tmp2 tmp158 = tl_math.exp(tmp157) tmp159 = tmp158 / tmp5 tmp160 = tmp155 * tmp159 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = _tmp162 + tmp161 _tmp162 = tl.where(rmask & xmask, tmp163, _tmp162) tmp166 = tmp165 - tmp2 tmp167 = tl_math.exp(tmp166) tmp168 = tmp167 / tmp5 tmp169 = tmp164 * tmp168 tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK]) tmp172 = _tmp171 + tmp170 _tmp171 = tl.where(rmask & xmask, tmp172, _tmp171) tmp175 = tmp174 - tmp2 tmp176 = tl_math.exp(tmp175) tmp177 = tmp176 / tmp5 tmp178 = tmp173 * tmp177 tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK]) tmp181 = _tmp180 + tmp179 _tmp180 = tl.where(rmask & xmask, tmp181, _tmp180) tmp184 = tmp183 - tmp2 tmp185 = tl_math.exp(tmp184) tmp186 = tmp185 / tmp5 tmp187 = tmp182 * tmp186 tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp190 = _tmp189 + tmp188 _tmp189 = tl.where(rmask & xmask, tmp190, _tmp189) tmp193 = tmp192 - tmp2 tmp194 = tl_math.exp(tmp193) tmp195 = tmp194 / tmp5 tmp196 = tmp191 * tmp195 tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK]) tmp199 = _tmp198 + tmp197 _tmp198 = tl.where(rmask & xmask, tmp199, _tmp198) tmp202 = tmp201 - tmp2 tmp203 = tl_math.exp(tmp202) tmp204 = tmp203 / tmp5 tmp205 = tmp200 * tmp204 tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK]) tmp208 = _tmp207 + tmp206 _tmp207 = tl.where(rmask & xmask, tmp208, _tmp207) tmp211 = tmp210 - tmp2 tmp212 = tl_math.exp(tmp211) tmp213 = tmp212 / tmp5 tmp214 = tmp209 * tmp213 tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK]) tmp217 = _tmp216 + tmp215 _tmp216 = tl.where(rmask & xmask, tmp217, _tmp216) tmp220 = tmp219 - tmp2 tmp221 = tl_math.exp(tmp220) tmp222 = tmp221 / tmp5 tmp223 = tmp218 * tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = _tmp225 + tmp224 _tmp225 = tl.where(rmask & xmask, tmp226, _tmp225) tmp229 = tmp228 - tmp2 tmp230 = tl_math.exp(tmp229) tmp231 = tmp230 / tmp5 tmp232 = tmp227 * tmp231 tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK]) tmp235 = _tmp234 + tmp233 _tmp234 = tl.where(rmask & xmask, tmp235, _tmp234) tmp238 = tmp237 - tmp2 tmp239 = tl_math.exp(tmp238) tmp240 = tmp239 / tmp5 tmp241 = tmp236 * tmp240 tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK]) tmp244 = _tmp243 + tmp242 _tmp243 = tl.where(rmask & xmask, tmp244, _tmp243) tmp247 = tmp246 - tmp2 tmp248 = tl_math.exp(tmp247) tmp249 = tmp248 / tmp5 tmp250 = tmp245 * tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = _tmp252 + tmp251 _tmp252 = tl.where(rmask & xmask, tmp253, _tmp252) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + x3, tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + x3, tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + x3, tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + x3, tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + x3, tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + x3, tmp63, xmask) tmp72 = tl.sum(_tmp72, 1)[:, None] tl.store(out_ptr7 + x3, tmp72, xmask) tmp81 = tl.sum(_tmp81, 1)[:, None] tl.store(out_ptr8 + x3, tmp81, xmask) tmp90 = tl.sum(_tmp90, 1)[:, None] tl.store(out_ptr9 + x3, tmp90, xmask) tmp99 = tl.sum(_tmp99, 1)[:, None] tl.store(out_ptr10 + x3, tmp99, xmask) tmp108 = tl.sum(_tmp108, 1)[:, None] tl.store(out_ptr11 + x3, tmp108, xmask) tmp117 = tl.sum(_tmp117, 1)[:, None] tl.store(out_ptr12 + x3, tmp117, xmask) tmp126 = tl.sum(_tmp126, 1)[:, None] tl.store(out_ptr13 + x3, tmp126, xmask) tmp135 = tl.sum(_tmp135, 1)[:, None] tl.store(out_ptr14 + x3, tmp135, xmask) tmp144 = tl.sum(_tmp144, 1)[:, None] tl.store(out_ptr15 + x3, tmp144, xmask) tmp153 = tl.sum(_tmp153, 1)[:, None] tl.store(out_ptr16 + x3, tmp153, xmask) tmp162 = tl.sum(_tmp162, 1)[:, None] tl.store(out_ptr17 + x3, tmp162, xmask) tmp171 = tl.sum(_tmp171, 1)[:, None] tl.store(out_ptr18 + x3, tmp171, xmask) tmp180 = tl.sum(_tmp180, 1)[:, None] tl.store(out_ptr19 + x3, tmp180, xmask) tmp189 = tl.sum(_tmp189, 1)[:, None] tl.store(out_ptr20 + x3, tmp189, xmask) tmp198 = tl.sum(_tmp198, 1)[:, None] tl.store(out_ptr21 + x3, tmp198, xmask) tmp207 = tl.sum(_tmp207, 1)[:, None] tl.store(out_ptr22 + x3, tmp207, xmask) tmp216 = tl.sum(_tmp216, 1)[:, None] tl.store(out_ptr23 + x3, tmp216, xmask) tmp225 = tl.sum(_tmp225, 1)[:, None] tl.store(out_ptr24 + x3, tmp225, xmask) tmp234 = tl.sum(_tmp234, 1)[:, None] tl.store(out_ptr25 + x3, tmp234, xmask) tmp243 = tl.sum(_tmp243, 1)[:, None] tl.store(out_ptr26 + x3, tmp243, xmask) tmp252 = tl.sum(_tmp252, 1)[:, None] tl.store(out_ptr27 + x3, tmp252, xmask) @triton.jit def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = xindex // 128 _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (233472 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (237568 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (241664 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (245760 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (249856 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (253952 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (258048 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + x3, tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + x3, tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + x3, tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + x3, tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + x3, tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + x3, tmp63, xmask) @triton.jit def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 64 r2 = rindex x1 = xindex // 64 x3 = xindex tmp0 = x0 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (r2 + 128 * x1), tmp5 & xmask, eviction_policy ='evict_last', other=0.0) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 >= tmp7 tmp9 = tmp0 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr1 + (r2 + 128 * x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp0 >= tmp12 tmp14 = tmp0 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tl.load(in_ptr2 + (r2 + 128 * x1), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.full([1, 1], 1, tl.int64) tmp18 = tmp0 >= tmp17 tmp19 = tmp0 < tmp12 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr3 + (r2 + 128 * x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 < tmp17 tmp23 = tl.load(in_ptr4 + (r2 + 128 * x1), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = 0.0 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tl.where(tmp20, tmp21, tmp25) tmp27 = tl.where(tmp15, tmp16, tmp26) tmp28 = tl.where(tmp10, tmp11, tmp27) tmp29 = tl.where(tmp5, tmp6, tmp28) tmp30 = tl.full([1, 1], 8, tl.int64) tmp31 = tmp0 >= tmp30 tmp32 = tl.full([1, 1], 9, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr5 + (r2 + 128 * x1), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.full([1, 1], 7, tl.int64) tmp37 = tmp0 >= tmp36 tmp38 = tmp0 < tmp30 tmp39 = tmp37 & tmp38 tmp40 = tl.load(in_ptr6 + (r2 + 128 * x1), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tl.full([1, 1], 6, tl.int64) tmp42 = tmp0 >= tmp41 tmp43 = tmp0 < tmp36 tmp44 = tmp42 & tmp43 tmp45 = tl.load(in_ptr7 + (r2 + 128 * x1), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp0 >= tmp3 tmp47 = tmp0 < tmp41 tmp48 = tmp46 & tmp47 tmp49 = tl.load(in_ptr8 + (r2 + 128 * x1), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.where(tmp48, tmp49, tmp29) tmp51 = tl.where(tmp44, tmp45, tmp50) tmp52 = tl.where(tmp39, tmp40, tmp51) tmp53 = tl.where(tmp34, tmp35, tmp52) tmp54 = tl.full([1, 1], 12, tl.int64) tmp55 = tmp0 >= tmp54 tmp56 = tl.full([1, 1], 13, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tmp55 & tmp57 tmp59 = tl.load(in_ptr9 + (r2 + 128 * x1), tmp58 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.full([1, 1], 11, tl.int64) tmp61 = tmp0 >= tmp60 tmp62 = tmp0 < tmp54 tmp63 = tmp61 & tmp62 tmp64 = tl.load(in_ptr10 + (r2 + 128 * x1), tmp63 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.full([1, 1], 10, tl.int64) tmp66 = tmp0 >= tmp65 tmp67 = tmp0 < tmp60 tmp68 = tmp66 & tmp67 tmp69 = tl.load(in_ptr11 + (r2 + 128 * x1), tmp68 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp0 >= tmp32 tmp71 = tmp0 < tmp65 tmp72 = tmp70 & tmp71 tmp73 = tl.load(in_ptr12 + (r2 + 128 * x1), tmp72 & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.where(tmp72, tmp73, tmp53) tmp75 = tl.where(tmp68, tmp69, tmp74) tmp76 = tl.where(tmp63, tmp64, tmp75) tmp77 = tl.where(tmp58, tmp59, tmp76) tmp78 = tl.full([1, 1], 16, tl.int64) tmp79 = tmp0 >= tmp78 tmp80 = tl.full([1, 1], 17, tl.int64) tmp81 = tmp0 < tmp80 tmp82 = tmp79 & tmp81 tmp83 = tl.load(in_ptr13 + (r2 + 128 * x1), tmp82 & xmask, eviction_policy='evict_last', other=0.0) tmp84 = tl.full([1, 1], 15, tl.int64) tmp85 = tmp0 >= tmp84 tmp86 = tmp0 < tmp78 tmp87 = tmp85 & tmp86 tmp88 = tl.load(in_ptr14 + (r2 + 128 * x1), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full([1, 1], 14, tl.int64) tmp90 = tmp0 >= tmp89 tmp91 = tmp0 < tmp84 tmp92 = tmp90 & tmp91 tmp93 = tl.load(in_ptr15 + (r2 + 128 * x1), tmp92 & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tmp0 >= tmp56 tmp95 = tmp0 < tmp89 tmp96 = tmp94 & tmp95 tmp97 = tl.load(in_ptr16 + (r2 + 128 * x1), tmp96 & xmask, eviction_policy='evict_last', other=0.0) tmp98 = tl.where(tmp96, tmp97, tmp77) tmp99 = tl.where(tmp92, tmp93, tmp98) tmp100 = tl.where(tmp87, tmp88, tmp99) tmp101 = tl.where(tmp82, tmp83, tmp100) tmp102 = tl.full([1, 1], 20, tl.int64) tmp103 = tmp0 >= tmp102 tmp104 = tl.full([1, 1], 21, tl.int64) tmp105 = tmp0 < tmp104 tmp106 = tmp103 & tmp105 tmp107 = tl.load(in_ptr17 + (r2 + 128 * x1), tmp106 & xmask, eviction_policy='evict_last', other=0.0) tmp108 = tl.full([1, 1], 19, tl.int64) tmp109 = tmp0 >= tmp108 tmp110 = tmp0 < tmp102 tmp111 = tmp109 & tmp110 tmp112 = tl.load(in_ptr18 + (r2 + 128 * x1), tmp111 & xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.full([1, 1], 18, tl.int64) tmp114 = tmp0 >= tmp113 tmp115 = tmp0 < tmp108 tmp116 = tmp114 & tmp115 tmp117 = tl.load(in_ptr19 + (r2 + 128 * x1), tmp116 & xmask, eviction_policy='evict_last', other=0.0) tmp118 = tmp0 >= tmp80 tmp119 = tmp0 < tmp113 tmp120 = tmp118 & tmp119 tmp121 = tl.load(in_ptr20 + (r2 + 128 * x1), tmp120 & xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.where(tmp120, tmp121, tmp101) tmp123 = tl.where(tmp116, tmp117, tmp122) tmp124 = tl.where(tmp111, tmp112, tmp123) tmp125 = tl.where(tmp106, tmp107, tmp124) tmp126 = tl.full([1, 1], 24, tl.int64) tmp127 = tmp0 >= tmp126 tmp128 = tl.full([1, 1], 25, tl.int64) tmp129 = tmp0 < tmp128 tmp130 = tmp127 & tmp129 tmp131 = tl.load(in_ptr21 + (r2 + 128 * x1), tmp130 & xmask, eviction_policy='evict_last', other=0.0) tmp132 = tl.full([1, 1], 23, tl.int64) tmp133 = tmp0 >= tmp132 tmp134 = tmp0 < tmp126 tmp135 = tmp133 & tmp134 tmp136 = tl.load(in_ptr22 + (r2 + 128 * x1), tmp135 & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.full([1, 1], 22, tl.int64) tmp138 = tmp0 >= tmp137 tmp139 = tmp0 < tmp132 tmp140 = tmp138 & tmp139 tmp141 = tl.load(in_ptr23 + (r2 + 128 * x1), tmp140 & xmask, eviction_policy='evict_last', other=0.0) tmp142 = tmp0 >= tmp104 tmp143 = tmp0 < tmp137 tmp144 = tmp142 & tmp143 tmp145 = tl.load(in_ptr24 + (r2 + 128 * x1), tmp144 & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.where(tmp144, tmp145, tmp125) tmp147 = tl.where(tmp140, tmp141, tmp146) tmp148 = tl.where(tmp135, tmp136, tmp147) tmp149 = tl.where(tmp130, tmp131, tmp148) tmp150 = tl.full([1, 1], 28, tl.int64) tmp151 = tmp0 >= tmp150 tmp152 = tl.full([1, 1], 29, tl.int64) tmp153 = tmp0 < tmp152 tmp154 = tmp151 & tmp153 tmp155 = tl.load(in_ptr25 + (r2 + 128 * x1), tmp154 & xmask, eviction_policy='evict_last', other=0.0) tmp156 = tl.full([1, 1], 27, tl.int64) tmp157 = tmp0 >= tmp156 tmp158 = tmp0 < tmp150 tmp159 = tmp157 & tmp158 tmp160 = tl.load(in_ptr26 + (r2 + 128 * x1), tmp159 & xmask, eviction_policy='evict_last', other=0.0) tmp161 = tl.full([1, 1], 26, tl.int64) tmp162 = tmp0 >= tmp161 tmp163 = tmp0 < tmp156 tmp164 = tmp162 & tmp163 tmp165 = tl.load(in_ptr27 + (r2 + 128 * x1), tmp164 & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tmp0 >= tmp128 tmp167 = tmp0 < tmp161 tmp168 = tmp166 & tmp167 tmp169 = tl.load(in_ptr28 + (r2 + 128 * x1), tmp168 & xmask, eviction_policy='evict_last', other=0.0) tmp170 = tl.where(tmp168, tmp169, tmp149) tmp171 = tl.where(tmp164, tmp165, tmp170) tmp172 = tl.where(tmp159, tmp160, tmp171) tmp173 = tl.where(tmp154, tmp155, tmp172) tmp174 = tl.full([1, 1], 32, tl.int64) tmp175 = tmp0 >= tmp174 tmp176 = tl.full([1, 1], 33, tl.int64) tmp177 = tmp0 < tmp176 tmp178 = tmp175 & tmp177 tmp179 = tl.load(in_ptr29 + (r2 + 128 * x1), tmp178 & xmask, eviction_policy='evict_last', other=0.0) tmp180 = tl.full([1, 1], 31, tl.int64) tmp181 = tmp0 >= tmp180 tmp182 = tmp0 < tmp174 tmp183 = tmp181 & tmp182 tmp184 = tl.load(in_ptr30 + (r2 + 128 * x1), tmp183 & xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.full([1, 1], 30, tl.int64) tmp186 = tmp0 >= tmp185 tmp187 = tmp0 < tmp180 tmp188 = tmp186 & tmp187 tmp189 = tl.load(in_ptr31 + (r2 + 128 * x1), tmp188 & xmask, eviction_policy='evict_last', other=0.0) tmp190 = tmp0 >= tmp152 tmp191 = tmp0 < tmp185 tmp192 = tmp190 & tmp191 tmp193 = tl.load(in_ptr32 + (r2 + 128 * x1), tmp192 & xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.where(tmp192, tmp193, tmp173) tmp195 = tl.where(tmp188, tmp189, tmp194) tmp196 = tl.where(tmp183, tmp184, tmp195) tmp197 = tl.where(tmp178, tmp179, tmp196) tmp198 = tl.full([1, 1], 36, tl.int64) tmp199 = tmp0 >= tmp198 tmp200 = tl.full([1, 1], 37, tl.int64) tmp201 = tmp0 < tmp200 tmp202 = tmp199 & tmp201 tmp203 = tl.load(in_ptr33 + (r2 + 128 * x1), tmp202 & xmask, eviction_policy='evict_last', other=0.0) tmp204 = tl.full([1, 1], 35, tl.int64) tmp205 = tmp0 >= tmp204 tmp206 = tmp0 < tmp198 tmp207 = tmp205 & tmp206 tmp208 = tl.load(in_ptr34 + (r2 + 128 * x1), tmp207 & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.full([1, 1], 34, tl.int64) tmp210 = tmp0 >= tmp209 tmp211 = tmp0 < tmp204 tmp212 = tmp210 & tmp211 tmp213 = tl.load(in_ptr35 + (r2 + 128 * x1), tmp212 & xmask, eviction_policy='evict_last', other=0.0) tmp214 = tmp0 >= tmp176 tmp215 = tmp0 < tmp209 tmp216 = tmp214 & tmp215 tmp217 = tl.load(in_ptr36 + (r2 + 128 * x1), tmp216 & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.where(tmp216, tmp217, tmp197) tmp219 = tl.where(tmp212, tmp213, tmp218) tmp220 = tl.where(tmp207, tmp208, tmp219) tmp221 = tl.where(tmp202, tmp203, tmp220) tmp222 = tl.full([1, 1], 40, tl.int64) tmp223 = tmp0 >= tmp222 tmp224 = tl.full([1, 1], 41, tl.int64) tmp225 = tmp0 < tmp224 tmp226 = tmp223 & tmp225 tmp227 = tl.load(in_ptr37 + (r2 + 128 * x1), tmp226 & xmask, eviction_policy='evict_last', other=0.0) tmp228 = tl.full([1, 1], 39, tl.int64) tmp229 = tmp0 >= tmp228 tmp230 = tmp0 < tmp222 tmp231 = tmp229 & tmp230 tmp232 = tl.load(in_ptr38 + (r2 + 128 * x1), tmp231 & xmask, eviction_policy='evict_last', other=0.0) tmp233 = tl.full([1, 1], 38, tl.int64) tmp234 = tmp0 >= tmp233 tmp235 = tmp0 < tmp228 tmp236 = tmp234 & tmp235 tmp237 = tl.load(in_ptr39 + (r2 + 128 * x1), tmp236 & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tmp0 >= tmp200 tmp239 = tmp0 < tmp233 tmp240 = tmp238 & tmp239 tmp241 = tl.load(in_ptr40 + (r2 + 128 * x1), tmp240 & xmask, eviction_policy='evict_last', other=0.0) tmp242 = tl.where(tmp240, tmp241, tmp221) tmp243 = tl.where(tmp236, tmp237, tmp242) tmp244 = tl.where(tmp231, tmp232, tmp243) tmp245 = tl.where(tmp226, tmp227, tmp244) tmp246 = tl.full([1, 1], 44, tl.int64) tmp247 = tmp0 >= tmp246 tmp248 = tl.full([1, 1], 45, tl.int64) tmp249 = tmp0 < tmp248 tmp250 = tmp247 & tmp249 tmp251 = tl.load(in_ptr41 + (r2 + 128 * x1), tmp250 & xmask, eviction_policy='evict_last', other=0.0) tmp252 = tl.full([1, 1], 43, tl.int64) tmp253 = tmp0 >= tmp252 tmp254 = tmp0 < tmp246 tmp255 = tmp253 & tmp254 tmp256 = tl.load(in_ptr42 + (r2 + 128 * x1), tmp255 & xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.full([1, 1], 42, tl.int64) tmp258 = tmp0 >= tmp257 tmp259 = tmp0 < tmp252 tmp260 = tmp258 & tmp259 tmp261 = tl.load(in_ptr43 + (r2 + 128 * x1), tmp260 & xmask, eviction_policy='evict_last', other=0.0) tmp262 = tmp0 >= tmp224 tmp263 = tmp0 < tmp257 tmp264 = tmp262 & tmp263 tmp265 = tl.load(in_ptr44 + (r2 + 128 * x1), tmp264 & xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.where(tmp264, tmp265, tmp245) tmp267 = tl.where(tmp260, tmp261, tmp266) tmp268 = tl.where(tmp255, tmp256, tmp267) tmp269 = tl.where(tmp250, tmp251, tmp268) tmp270 = tl.full([1, 1], 48, tl.int64) tmp271 = tmp0 >= tmp270 tmp272 = tl.full([1, 1], 49, tl.int64) tmp273 = tmp0 < tmp272 tmp274 = tmp271 & tmp273 tmp275 = tl.load(in_ptr45 + (r2 + 128 * x1), tmp274 & xmask, eviction_policy='evict_last', other=0.0) tmp276 = tl.full([1, 1], 47, tl.int64) tmp277 = tmp0 >= tmp276 tmp278 = tmp0 < tmp270 tmp279 = tmp277 & tmp278 tmp280 = tl.load(in_ptr46 + (r2 + 128 * x1), tmp279 & xmask, eviction_policy='evict_last', other=0.0) tmp281 = tl.full([1, 1], 46, tl.int64) tmp282 = tmp0 >= tmp281 tmp283 = tmp0 < tmp276 tmp284 = tmp282 & tmp283 tmp285 = tl.load(in_ptr47 + (r2 + 128 * x1), tmp284 & xmask, eviction_policy='evict_last', other=0.0) tmp286 = tmp0 >= tmp248 tmp287 = tmp0 < tmp281 tmp288 = tmp286 & tmp287 tmp289 = tl.load(in_ptr48 + (r2 + 128 * x1), tmp288 & xmask, eviction_policy='evict_last', other=0.0) tmp290 = tl.where(tmp288, tmp289, tmp269) tmp291 = tl.where(tmp284, tmp285, tmp290) tmp292 = tl.where(tmp279, tmp280, tmp291) tmp293 = tl.where(tmp274, tmp275, tmp292) tmp294 = tl.full([1, 1], 52, tl.int64) tmp295 = tmp0 >= tmp294 tmp296 = tl.full([1, 1], 53, tl.int64) tmp297 = tmp0 < tmp296 tmp298 = tmp295 & tmp297 tmp299 = tl.load(in_ptr49 + (r2 + 128 * x1), tmp298 & xmask, eviction_policy='evict_last', other=0.0) tmp300 = tl.full([1, 1], 51, tl.int64) tmp301 = tmp0 >= tmp300 tmp302 = tmp0 < tmp294 tmp303 = tmp301 & tmp302 tmp304 = tl.load(in_ptr50 + (r2 + 128 * x1), tmp303 & xmask, eviction_policy='evict_last', other=0.0) tmp305 = tl.full([1, 1], 50, tl.int64) tmp306 = tmp0 >= tmp305 tmp307 = tmp0 < tmp300 tmp308 = tmp306 & tmp307 tmp309 = tl.load(in_ptr51 + (r2 + 128 * x1), tmp308 & xmask, eviction_policy='evict_last', other=0.0) tmp310 = tmp0 >= tmp272 tmp311 = tmp0 < tmp305 tmp312 = tmp310 & tmp311 tmp313 = tl.load(in_ptr52 + (r2 + 128 * x1), tmp312 & xmask, eviction_policy='evict_last', other=0.0) tmp314 = tl.where(tmp312, tmp313, tmp293) tmp315 = tl.where(tmp308, tmp309, tmp314) tmp316 = tl.where(tmp303, tmp304, tmp315) tmp317 = tl.where(tmp298, tmp299, tmp316) tmp318 = tl.full([1, 1], 56, tl.int64) tmp319 = tmp0 >= tmp318 tmp320 = tl.full([1, 1], 57, tl.int64) tmp321 = tmp0 < tmp320 tmp322 = tmp319 & tmp321 tmp323 = tl.load(in_ptr53 + (r2 + 128 * x1), tmp322 & xmask, eviction_policy='evict_last', other=0.0) tmp324 = tl.full([1, 1], 55, tl.int64) tmp325 = tmp0 >= tmp324 tmp326 = tmp0 < tmp318 tmp327 = tmp325 & tmp326 tmp328 = tl.load(in_ptr54 + (r2 + 128 * x1), tmp327 & xmask, eviction_policy='evict_last', other=0.0) tmp329 = tl.full([1, 1], 54, tl.int64) tmp330 = tmp0 >= tmp329 tmp331 = tmp0 < tmp324 tmp332 = tmp330 & tmp331 tmp333 = tl.load(in_ptr55 + (r2 + 128 * x1), tmp332 & xmask, eviction_policy='evict_last', other=0.0) tmp334 = tmp0 >= tmp296 tmp335 = tmp0 < tmp329 tmp336 = tmp334 & tmp335 tmp337 = tl.load(in_ptr56 + (r2 + 128 * x1), tmp336 & xmask, eviction_policy='evict_last', other=0.0) tmp338 = tl.where(tmp336, tmp337, tmp317) tmp339 = tl.where(tmp332, tmp333, tmp338) tmp340 = tl.where(tmp327, tmp328, tmp339) tmp341 = tl.where(tmp322, tmp323, tmp340) tmp342 = tl.full([1, 1], 60, tl.int64) tmp343 = tmp0 >= tmp342 tmp344 = tl.full([1, 1], 61, tl.int64) tmp345 = tmp0 < tmp344 tmp346 = tmp343 & tmp345 tmp347 = tl.load(in_ptr57 + (r2 + 128 * x1), tmp346 & xmask, eviction_policy='evict_last', other=0.0) tmp348 = tl.full([1, 1], 59, tl.int64) tmp349 = tmp0 >= tmp348 tmp350 = tmp0 < tmp342 tmp351 = tmp349 & tmp350 tmp352 = tl.load(in_ptr58 + (r2 + 128 * x1), tmp351 & xmask, eviction_policy='evict_last', other=0.0) tmp353 = tl.full([1, 1], 58, tl.int64) tmp354 = tmp0 >= tmp353 tmp355 = tmp0 < tmp348 tmp356 = tmp354 & tmp355 tmp357 = tl.load(in_ptr59 + (r2 + 128 * x1), tmp356 & xmask, eviction_policy='evict_last', other=0.0) tmp358 = tmp0 >= tmp320 tmp359 = tmp0 < tmp353 tmp360 = tmp358 & tmp359 tmp361 = tl.load(in_ptr60 + (r2 + 128 * x1), tmp360 & xmask, eviction_policy='evict_last', other=0.0) tmp362 = tl.where(tmp360, tmp361, tmp341) tmp363 = tl.where(tmp356, tmp357, tmp362) tmp364 = tl.where(tmp351, tmp352, tmp363) tmp365 = tl.where(tmp346, tmp347, tmp364) tmp366 = tl.full([1, 1], 63, tl.int64) tmp367 = tmp0 >= tmp366 tmp368 = tl.load(in_ptr61 + (r2 + 128 * x1), tmp367 & xmask, eviction_policy='evict_last', other=0.0) tmp369 = tl.full([1, 1], 62, tl.int64) tmp370 = tmp0 >= tmp369 tmp371 = tmp0 < tmp366 tmp372 = tmp370 & tmp371 tmp373 = tl.load(in_ptr62 + (r2 + 128 * x1), tmp372 & xmask, eviction_policy='evict_last', other=0.0) tmp374 = tmp0 >= tmp344 tmp375 = tmp0 < tmp369 tmp376 = tmp374 & tmp375 tmp377 = tl.load(in_ptr63 + (r2 + 128 * x1), tmp376 & xmask, eviction_policy='evict_last', other=0.0) tmp378 = tl.where(tmp376, tmp377, tmp365) tmp379 = tl.where(tmp372, tmp373, tmp378) tmp380 = tl.where(tmp367, tmp368, tmp379) tmp381 = tmp380 * tmp380 tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK]) tmp384 = tl.where(xmask, tmp382, 0) tmp385 = tl.sum(tmp384, 1)[:, None] tmp386 = libdevice.sqrt(tmp385) tl.store(in_out_ptr0 + (r2 + 128 * x3), tmp380, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp386, xmask) @triton.jit def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 4 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp9 = libdevice.sqrt(tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp10 / tmp13 tmp15 = triton_helpers.maximum(tmp9, tmp12) tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_3, (64, 128), (128, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) get_raw_stream(0) triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0, 16384, 128, XBLOCK=64, RBLOCK=4, num_warps=8, num_stages=1) buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) triton_poi_fused_div_sub_1[grid(2097152)](primals_1, buf0, primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, 2097152, XBLOCK=512, num_warps= 8, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) triton_per_fused__softmax_2[grid(16384)](buf2, buf3, buf4, 16384, 64, XBLOCK=8, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sub_sum_3[grid(512)](buf1, primals_3, buf2, buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, 512, 4096, XBLOCK=1, RBLOCK= 1024, num_warps=16, num_stages=1) buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sum_4[grid(512)](buf69, buf2, buf3, buf4, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, 512, 4096, XBLOCK=1, RBLOCK= 1024, num_warps=16, num_stages=1) buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sum_5[grid(512)](buf132, buf2, buf3, buf4, buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135, buf137, buf139, buf142, buf144, buf146, 512, 4096, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32) buf23 = buf14 del buf14 buf32 = buf23 del buf23 buf41 = buf32 del buf32 buf50 = buf41 del buf41 buf59 = buf50 del buf50 buf68 = buf59 del buf59 buf77 = buf68 del buf68 buf86 = buf77 del buf77 buf95 = buf86 del buf86 buf104 = buf95 del buf95 buf113 = buf104 del buf104 buf122 = buf113 del buf113 buf131 = buf122 del buf122 buf140 = buf131 del buf131 buf147 = buf140 del buf140 buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32) buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0) del buf148 triton_per_fused_copy_linalg_vector_norm_zeros_6[grid(256)](buf147, buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18, buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34, buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67, buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83, buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99, buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117, buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135, buf133, buf146, buf144, buf142, 256, 128, XBLOCK=1, num_warps=2, num_stages=1) del buf101 del buf103 del buf106 del buf108 del buf11 del buf110 del buf112 del buf115 del buf117 del buf119 del buf121 del buf124 del buf126 del buf128 del buf13 del buf130 del buf133 del buf135 del buf137 del buf139 del buf142 del buf144 del buf146 del buf16 del buf18 del buf20 del buf22 del buf25 del buf27 del buf29 del buf31 del buf34 del buf36 del buf38 del buf40 del buf43 del buf45 del buf47 del buf49 del buf5 del buf52 del buf54 del buf56 del buf58 del buf61 del buf63 del buf65 del buf67 del buf7 del buf70 del buf72 del buf74 del buf76 del buf79 del buf81 del buf83 del buf85 del buf88 del buf9 del buf90 del buf92 del buf94 del buf97 del buf99 buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0) del buf150 buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32) triton_red_fused_div_linalg_vector_norm_7[grid(4)](buf151, buf147, buf149, buf152, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor( primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151) class NetVLADNew(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, normalize_input=True, vladv2=False): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. vladv2 : bool If true, use vladv2 otherwise use vladv1 """ super(NetVLADNew, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = 0 self.vladv2 = vladv2 self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias= vladv2) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) def init_params(self, clsts, traindescs): if self.vladv2 is False: clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clstsAssign, traindescs.T) dots.sort(0) dots = dots[::-1, :] self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None else: knn = NearestNeighbors(n_jobs=-1) knn.fit(traindescs) del traindescs dsSq = np.square(knn.kneighbors(clsts, 2)[1]) del knn self.alpha = (-np.log(0.01) / np.mean(dsSq[:, 1] - dsSq[:, 0]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) del clsts, dsSq self.conv.weight = nn.Parameter((2.0 * self.alpha * self. centroids).unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm (dim=1)) def forward(self, input_0): primals_3 = self.centroids primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Rick0514/VPR_SMCN
NetVLAD
false
3,089
[ "MIT" ]
0
7a00dc8e4de0c21438474c05a4a7be18d05367fa
https://github.com/Rick0514/VPR_SMCN/tree/7a00dc8e4de0c21438474c05a4a7be18d05367fa
wTransitionLinearUnit
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module from scipy.sparse import * class wTransitionLinearUnit(Module): def __init__(self, ori_dim, tar_dim): super(wTransitionLinearUnit, self).__init__() self.linear_1 = torch.nn.Linear(tar_dim, ori_dim) self.linear_2 = torch.nn.Linear(tar_dim, tar_dim) self.linear_3 = torch.nn.Linear(tar_dim, tar_dim) self.ori_dim = ori_dim self.tar_dim = tar_dim self.reset_parameters() def reset_parameters(self): pass def forward(self, last_w, z_cov): z_cov = (z_cov - z_cov.mean()) / z_cov.std() hidden = F.relu(self.linear_1(z_cov.t())) w_update = self.linear_2(hidden) update_gate = torch.sigmoid(self.linear_3(hidden)) w_update = torch.clamp(w_update, min=-0.1, max=0.1) return (1 - update_gate) * last_w + w_update * update_gate def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'ori_dim': 4, 'tar_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch.nn.modules.module import Module from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mean_std_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 16.0 tmp17 = tmp3 / tmp16 tmp18 = tmp0 - tmp17 tmp19 = 15.0 tmp20 = tmp15 / tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp18 / tmp21 tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_clamp_mul_rsub_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + x2, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tmp3 * tmp4 tmp7 = -0.1 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = 0.1 tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = tmp10 * tmp1 tmp12 = tmp5 + tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mean_std_sub_0[grid(1)](primals_1, buf4, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf5) del primals_2 buf6 = buf5 del buf5 triton_poi_fused_relu_1[grid(16)](buf6, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf6, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_5 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf6, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_7 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_clamp_mul_rsub_sigmoid_2[grid(256)](buf8, primals_8, buf7, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf9, primals_8, reinterpret_tensor(buf4, (4, 4), (1, 4), 0 ), buf6, buf7, buf8, primals_6, primals_4 class wTransitionLinearUnitNew(Module): def __init__(self, ori_dim, tar_dim): super(wTransitionLinearUnitNew, self).__init__() self.linear_1 = torch.nn.Linear(tar_dim, ori_dim) self.linear_2 = torch.nn.Linear(tar_dim, tar_dim) self.linear_3 = torch.nn.Linear(tar_dim, tar_dim) self.ori_dim = ori_dim self.tar_dim = tar_dim self.reset_parameters() def reset_parameters(self): pass def forward(self, input_0, input_1): primals_1 = self.linear_1.weight primals_3 = self.linear_1.bias primals_2 = self.linear_2.weight primals_5 = self.linear_2.bias primals_4 = self.linear_3.weight primals_7 = self.linear_3.bias primals_8 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
TTomatoZhang/GHGCN
wTransitionLinearUnit
false
3,090
[ "Apache-2.0" ]
0
09a07ff9e29e5889b912ca5feff74bb9308eda55
https://github.com/TTomatoZhang/GHGCN/tree/09a07ff9e29e5889b912ca5feff74bb9308eda55
ModMBStddevLayer
import torch import torch.nn as nn class ModMBStddevLayer(nn.Module): """Modified MiniBatch Stddev Layer. This layer is modified from ``MiniBatchStddevLayer`` used in PGGAN. In StyleGAN2, the authors add a new feature, `channel_groups`, into this layer. """ def __init__(self, group_size=4, channel_groups=1, sync_groups=None, eps=1e-08): super(ModMBStddevLayer, self).__init__() self.group_size = group_size self.eps = eps self.channel_groups = channel_groups self.sync_groups = group_size if sync_groups is None else sync_groups def forward(self, x): assert x.shape[0] <= self.group_size or x.shape[0 ] % self.group_size == 0, f'Batch size be smaller than or equal to group size. Otherwise, batch size should be divisible by the group size.But got batch size {x.shape[0]}, group size {self.group_size}' assert x.shape[1 ] % self.channel_groups == 0, f'"channel_groups" must be divided by the feature channels. channel_groups: {self.channel_groups}, feature channels: {x.shape[1]}' n, c, h, w = x.shape group_size = min(n, self.group_size) y = torch.reshape(x, (group_size, -1, self.channel_groups, c // self.channel_groups, h, w)) y = torch.var(y, dim=0, unbiased=False) y = torch.sqrt(y + self.eps) y = y.mean(dim=(2, 3, 4), keepdim=True).squeeze(2) y = y.repeat(group_size, 1, h, w) return torch.cat([x, y], dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_repeat_sqrt_var_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp28, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_mean_repeat_sqrt_var_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class ModMBStddevLayerNew(nn.Module): """Modified MiniBatch Stddev Layer. This layer is modified from ``MiniBatchStddevLayer`` used in PGGAN. In StyleGAN2, the authors add a new feature, `channel_groups`, into this layer. """ def __init__(self, group_size=4, channel_groups=1, sync_groups=None, eps=1e-08): super(ModMBStddevLayerNew, self).__init__() self.group_size = group_size self.eps = eps self.channel_groups = channel_groups self.sync_groups = group_size if sync_groups is None else sync_groups def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
akimotty877/mmediting
ModMBStddevLayer
false
3,091
[ "Apache-2.0" ]
0
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
RegModel
import torch import torch.nn as nn class RegModel(nn.Module): def __init__(self, input_size): super(RegModel, self).__init__() self.fc1 = nn.Linear(input_size, 50) self.relu1 = nn.ReLU() self.dout = nn.Dropout(0.2) self.fc2 = nn.Linear(50, 100) self.prelu = nn.PReLU(1) self.out = nn.Linear(100, 1) def forward(self, input_): a1 = self.fc1(input_) h1 = self.relu1(a1) dout = self.dout(h1) a2 = self.fc2(dout) h2 = self.prelu(a2) y = self.out(h2) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 50), (50, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (1, 100), (100, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1, primals_2, buf6, 3200, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 100), (1, 50), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1), torch.float32) triton_poi_fused__prelu_kernel_1[grid(6400)](buf2, primals_6, buf3, 6400, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_7, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf5) del primals_8 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 50), (50, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 100), (100, 1), 0 ), primals_7, primals_4, buf6 class RegModelNew(nn.Module): def __init__(self, input_size): super(RegModelNew, self).__init__() self.fc1 = nn.Linear(input_size, 50) self.relu1 = nn.ReLU() self.dout = nn.Dropout(0.2) self.fc2 = nn.Linear(50, 100) self.prelu = nn.PReLU(1) self.out = nn.Linear(100, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.prelu.weight primals_7 = self.out.weight primals_8 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
amperie/user-models
RegModel
false
3,092
[ "Apache-2.0" ]
0
5236c50d0f20a7bac81acc5d1936a3502de2f5f3
https://github.com/amperie/user-models/tree/5236c50d0f20a7bac81acc5d1936a3502de2f5f3
ConvEncoder
import torch import torch.nn as nn import torch.autograd def pytorch_activation(name='relu'): if name == 'tanh': return nn.Tanh() if name == 'identity': return nn.Identity() if name == 'hardtanh': return nn.Hardtanh() if name == 'prelu': return nn.PReLU() if name == 'sigmoid': return nn.Sigmoid() if name == 'log_sigmoid': return nn.LogSigmoid() return nn.ReLU() class ConvEncoder(nn.Module): def __init__(self, insz, outsz, filtsz, pdrop, activation_type='relu'): super(ConvEncoder, self).__init__() self.outsz = outsz pad = filtsz // 2 self.conv = nn.Conv1d(insz, outsz, filtsz, padding=pad) self.act = pytorch_activation(activation_type) self.dropout = nn.Dropout(pdrop) def forward(self, input_bct): conv_out = self.act(self.conv(input_bct)) return self.dropout(conv_out) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'insz': 4, 'outsz': 4, 'filtsz': 4, 'pdrop': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 5), (20, 5, 1)) buf1 = reinterpret_tensor(buf0, (4, 5), (5, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 5), (5, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(20)](buf1, primals_2, buf2, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf2 def pytorch_activation(name='relu'): if name == 'tanh': return nn.Tanh() if name == 'identity': return nn.Identity() if name == 'hardtanh': return nn.Hardtanh() if name == 'prelu': return nn.PReLU() if name == 'sigmoid': return nn.Sigmoid() if name == 'log_sigmoid': return nn.LogSigmoid() return nn.ReLU() class ConvEncoderNew(nn.Module): def __init__(self, insz, outsz, filtsz, pdrop, activation_type='relu'): super(ConvEncoderNew, self).__init__() self.outsz = outsz pad = filtsz // 2 self.conv = nn.Conv1d(insz, outsz, filtsz, padding=pad) self.act = pytorch_activation(activation_type) self.dropout = nn.Dropout(pdrop) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
amyhemmeter/baseline
ConvEncoder
false
3,093
[ "Apache-2.0" ]
0
101a393398570747d14a32eb3af72664e2774c8b
https://github.com/amyhemmeter/baseline/tree/101a393398570747d14a32eb3af72664e2774c8b
Critic
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.relu(self.fcs1(state)) x = torch.cat((xs, action), dim=-1) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25856 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 404 x3 = xindex // 404 x2 = xindex // 1616 x4 = xindex % 1616 tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 400, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (400 * x3 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 404, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x3 + (-400 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x4 + 1632 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_cat_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 25856 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 404 x1 = xindex // 404 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 404 * (x1 % 4) + 1632 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 400 x2 = xindex // 1600 x4 = xindex % 1600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x4 + 1664 * x2), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (300, 404), (404, 1)) assert_size_stride(primals_6, (300,), (1,)) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 404), (6528, 1632, 404, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(25856)](buf0, primals_2, primals_4, buf1, 25856, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((64, 404), (404, 1), torch.float32) triton_poi_fused_cat_view_1[grid(25856)](buf1, buf2, 25856, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (404, 300), ( 1, 404), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3, primals_6, buf4, buf8, 19200, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_6 buf5 = buf3 del buf3 triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK =256, num_warps=4, num_stages=1) del buf4 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf7) del primals_8 buf9 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(25600)](buf0, primals_2, buf9, 25600, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, primals_7, buf8, primals_5, buf9 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
akashkmr27089/ReinforcementLearning_Udacity_Deep_Reinforcemnt_Learning
Critic
false
3,094
[ "MIT" ]
0
b7dc13b0116898848d8d0b8a95b7af182982bd6b
https://github.com/akashkmr27089/ReinforcementLearning_Udacity_Deep_Reinforcemnt_Learning/tree/b7dc13b0116898848d8d0b8a95b7af182982bd6b
AttentionPooling
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn import torch.distributed import torch.distributions def compute_attention(q, k, v, dropout=None, mask=None): """ :param q: Query [B, NH, NQ, EL] or [NH, 1, EL] (in this case NQ=1) :param k: Key [B, NH, NK, EL] :param v: Value [B, NH, NK, EL] :param mask: [B, NQ, NK] :param dropout: :return: """ if q.ndim + 1 == k.ndim: score = torch.einsum('nij,bnkj->bnik', q, k) elif q.ndim == k.ndim: score = torch.einsum('bnij,bnkj->bnik', q, k) score = score / np.sqrt(q.shape[-1]) if mask is not None: mask = mask[:, None] score = score * mask + -100000000.0 * (1 - mask) score = F.softmax(score, dim=-1) if dropout is not None: score = dropout(score) return torch.einsum('bnij,bnjk->bnik', score, v) class MultiHeadedAttentionBase(nn.Module): def __init__(self, embed_dim, num_heads, latent_dim, dropout=None): """ :param embed_dim: The dimension of feature in each entity. :param num_heads: The number of attention heads. :param latent_dim: :param dropout: """ super().__init__() self.w_k = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self.w_v = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self.w_o = nn.Parameter(torch.empty(num_heads, latent_dim, embed_dim)) self.dropout = nn.Dropout(dropout) if dropout else nn.Identity() def _reset_parameters(self): nn.init.xavier_normal_(self.w_k) nn.init.xavier_normal_(self.w_v) nn.init.xavier_normal_(self.w_o) if hasattr(self, 'q'): nn.init.xavier_normal_(self.q) if hasattr(self, 'w_q'): nn.init.xavier_normal_(self.w_q) class AttentionPooling(MultiHeadedAttentionBase): def __init__(self, embed_dim, num_heads, latent_dim, dropout=None): super().__init__(embed_dim, num_heads, latent_dim, dropout) self.q = nn.Parameter(torch.empty(num_heads, 1, latent_dim)) self._reset_parameters() def forward(self, x, mask=None): """ :param x: [B, N, E] [batch size, length, embed_dim] the input to the layer, a tensor of shape :param mask: [B, 1, N] [batch size, 1, length] :return: [B, E] [batch_size, embed_dim] one feature with size """ k = torch.einsum('blj,njd->bnld', x, self.w_k) v = torch.einsum('blj,njd->bnld', x, self.w_v) out = compute_attention(self.q, k, v, self.dropout, mask) out = torch.einsum('bnlj,njk->blk', out, self.w_o) out = out[:, 0] return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4, 'latent_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn.functional as F import torch.nn as nn import torch.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_div_sqrt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 2.0 tmp2 = tmp0 / tmp1 tmp4 = tmp3 / tmp1 tmp6 = tmp5 / tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 / tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 / tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 4 * x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x4), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x4), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1), tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch. float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0), out=buf1) buf2 = buf0 del buf0 triton_poi_fused_clone_0[grid(64)](primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf2, (1, 4, 16), (0, 16, 1), 0), out=buf3) buf4 = reinterpret_tensor(buf2, (4, 1, 16), (16, 16, 1), 0) del buf2 extern_kernels.bmm(primals_4, reinterpret_tensor(buf1, (4, 4, 16), (4, 1, 16), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 1, 4), (4, 16, 64, 1), torch.float32) triton_poi_fused__softmax_div_sqrt_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 4, 1), torch .float32) triton_poi_fused_clone_3[grid(256)](buf3, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf8 = reinterpret_tensor(buf5, (16, 1, 4), (4, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch. float32) triton_poi_fused_clone_4[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 1), (16, 4, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(64)](primals_5, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf11 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf9, (1, 4, 16), (0, 16, 1), 0), reinterpret_tensor(buf10, (1, 16, 4), (0, 4, 1), 0), out=buf11) return reinterpret_tensor(buf11, (4, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf9, (1, 16, 4), (64, 1, 16), 0 ), reinterpret_tensor(buf10, (1, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_4, (4, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 16, 4), (4, 16, 1), 0 ), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0) def compute_attention(q, k, v, dropout=None, mask=None): """ :param q: Query [B, NH, NQ, EL] or [NH, 1, EL] (in this case NQ=1) :param k: Key [B, NH, NK, EL] :param v: Value [B, NH, NK, EL] :param mask: [B, NQ, NK] :param dropout: :return: """ if q.ndim + 1 == k.ndim: score = torch.einsum('nij,bnkj->bnik', q, k) elif q.ndim == k.ndim: score = torch.einsum('bnij,bnkj->bnik', q, k) score = score / np.sqrt(q.shape[-1]) if mask is not None: mask = mask[:, None] score = score * mask + -100000000.0 * (1 - mask) score = F.softmax(score, dim=-1) if dropout is not None: score = dropout(score) return torch.einsum('bnij,bnjk->bnik', score, v) class MultiHeadedAttentionBase(nn.Module): def __init__(self, embed_dim, num_heads, latent_dim, dropout=None): """ :param embed_dim: The dimension of feature in each entity. :param num_heads: The number of attention heads. :param latent_dim: :param dropout: """ super().__init__() self.w_k = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self.w_v = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self.w_o = nn.Parameter(torch.empty(num_heads, latent_dim, embed_dim)) self.dropout = nn.Dropout(dropout) if dropout else nn.Identity() def _reset_parameters(self): nn.init.xavier_normal_(self.w_k) nn.init.xavier_normal_(self.w_v) nn.init.xavier_normal_(self.w_o) if hasattr(self, 'q'): nn.init.xavier_normal_(self.q) if hasattr(self, 'w_q'): nn.init.xavier_normal_(self.w_q) class AttentionPoolingNew(MultiHeadedAttentionBase): def __init__(self, embed_dim, num_heads, latent_dim, dropout=None): super().__init__(embed_dim, num_heads, latent_dim, dropout) self.q = nn.Parameter(torch.empty(num_heads, 1, latent_dim)) self._reset_parameters() def forward(self, input_0): primals_1 = self.w_k primals_2 = self.w_v primals_3 = self.w_o primals_4 = self.q primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Zed-Wu/ManiSkill-Learn
AttentionPooling
false
3,095
[ "Apache-2.0" ]
0
8056fe327752cd0863f8730672fe62bd85a0ec12
https://github.com/Zed-Wu/ManiSkill-Learn/tree/8056fe327752cd0863f8730672fe62bd85a0ec12
MultiModel
import torch import torch.nn as nn class MultiModel(nn.Module): def __init__(self, input_size, output_size): super(MultiModel, self).__init__() self.layer1 = nn.Linear(input_size, 8) self.relu = nn.ReLU() self.layer2 = nn.Linear(8, output_size) self.out = nn.Softmax() def forward(self, input_): out = self.layer1(input_) out = self.relu(out) out = self.layer2(out) out = self.out(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, primals_2, buf5, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), buf4, primals_4, buf5 class MultiModelNew(nn.Module): def __init__(self, input_size, output_size): super(MultiModelNew, self).__init__() self.layer1 = nn.Linear(input_size, 8) self.relu = nn.ReLU() self.layer2 = nn.Linear(8, output_size) self.out = nn.Softmax() def forward(self, input_0): primals_1 = self.layer1.weight primals_2 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
amperie/user-models
MultiModel
false
3,096
[ "Apache-2.0" ]
0
5236c50d0f20a7bac81acc5d1936a3502de2f5f3
https://github.com/amperie/user-models/tree/5236c50d0f20a7bac81acc5d1936a3502de2f5f3
LandmarkHead
import torch from torch import nn import torch.nn class LandmarkHead(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(LandmarkHead, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 40, kernel_size= (1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) return out def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 480 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 120 y1 = yindex // 120 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 120 * x2 + 491520 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (120, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (120,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 120, 64, 64), (491520, 1, 7680, 120)) buf2 = empty_strided_cuda((4, 120, 64, 64), (491520, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_1[grid(480, 4096)](buf1, primals_2, buf2, 480, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf1 del primals_2 return buf2, primals_1, buf0 class LandmarkHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=3): super(LandmarkHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 40, kernel_size= (1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ZongqingHou/Pytorch_Retinaface
LandmarkHead
false
3,097
[ "MIT" ]
0
6284b7158a0d9d3d4a2cc267a393c21863a1b938
https://github.com/ZongqingHou/Pytorch_Retinaface/tree/6284b7158a0d9d3d4a2cc267a393c21863a1b938
LayerNorm
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.optim.lr_scheduler import * from torch.nn import Parameter class LayerNorm(nn.Module): def __init__(self, hidden_size, eps=0.0001): super(LayerNorm, self).__init__() self.alpha = Parameter(torch.ones(1, 1, hidden_size)) self.beta = Parameter(torch.zeros(1, 1, hidden_size)) self.eps = eps def forward(self, x): """ Args: :param x: batch * len * input_size Returns: normalized x """ mu = torch.mean(x, 2, keepdim=True).expand_as(x) sigma = torch.std(x, 2, keepdim=True).expand_as(x) return (x - mu) / (sigma + self.eps) * self.alpha.expand_as(x ) + self.beta.expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.parameter import Parameter from torch.optim.lr_scheduler import * from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 0.0001 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, hidden_size, eps=0.0001): super(LayerNormNew, self).__init__() self.alpha = Parameter(torch.ones(1, 1, hidden_size)) self.beta = Parameter(torch.zeros(1, 1, hidden_size)) self.eps = eps def forward(self, input_0): primals_2 = self.alpha primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
aerinkim/squad_2018
LayerNorm
false
3,098
[ "BSD-3-Clause" ]
0
4479fa7ce92d8ab2f2eeb1823991d416924d8561
https://github.com/aerinkim/squad_2018/tree/4479fa7ce92d8ab2f2eeb1823991d416924d8561
Clamp
import torch import torch.nn as nn import torch.distributed import torch.distributions class Clamp(nn.Module): def __init__(self, min=-1.0, max=1.0): super(Clamp, self).__init__() self.min = min self.max = max def forward(self, x): return torch.clamp(x, min=self.min, max=self.max) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ClampNew(nn.Module): def __init__(self, min=-1.0, max=1.0): super(ClampNew, self).__init__() self.min = min self.max = max def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Zed-Wu/ManiSkill-Learn
Clamp
false
3,099
[ "Apache-2.0" ]
0
8056fe327752cd0863f8730672fe62bd85a0ec12
https://github.com/Zed-Wu/ManiSkill-Learn/tree/8056fe327752cd0863f8730672fe62bd85a0ec12
BinModel
import torch import torch.nn as nn class BinModel(nn.Module): def __init__(self, input_size): super(BinModel, self).__init__() self.fc1 = nn.Linear(input_size, 50) self.relu1 = nn.ReLU() self.dout = nn.Dropout(0.2) self.fc2 = nn.Linear(50, 100) self.prelu = nn.PReLU(1) self.out = nn.Linear(100, 1) self.out_act = nn.Sigmoid() def forward(self, input_): a1 = self.fc1(input_) h1 = self.relu1(a1) dout = self.dout(h1) a2 = self.fc2(dout) h2 = self.prelu(a2) a3 = self.out(h2) y = self.out_act(a3) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 50), (50, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (1, 100), (100, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1, primals_2, buf6, 3200, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 100), (1, 50), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1), torch.float32) triton_poi_fused__prelu_kernel_1[grid(6400)](buf2, primals_6, buf3, 6400, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_7, (100, 1), (1, 100), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(64)](buf5, primals_8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 return buf5, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 50), (50, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 100), (100, 1), 0 ), buf5, primals_7, primals_4, buf6 class BinModelNew(nn.Module): def __init__(self, input_size): super(BinModelNew, self).__init__() self.fc1 = nn.Linear(input_size, 50) self.relu1 = nn.ReLU() self.dout = nn.Dropout(0.2) self.fc2 = nn.Linear(50, 100) self.prelu = nn.PReLU(1) self.out = nn.Linear(100, 1) self.out_act = nn.Sigmoid() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.prelu.weight primals_7 = self.out.weight primals_8 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
amperie/user-models
BinModel
false
3,100
[ "Apache-2.0" ]
0
5236c50d0f20a7bac81acc5d1936a3502de2f5f3
https://github.com/amperie/user-models/tree/5236c50d0f20a7bac81acc5d1936a3502de2f5f3
MultiHeadedAttention
import math import torch import torch.nn as nn import torch.nn.functional as F import torch.autograd def pytorch_linear(in_sz, out_sz, unif=0, initializer=None): l = nn.Linear(in_sz, out_sz) if unif > 0: l.weight.data.uniform_(-unif, unif) elif initializer == 'ortho': nn.init.orthogonal(l.weight) elif initializer == 'he' or initializer == 'kaiming': nn.init.kaiming_uniform(l.weight) else: nn.init.xavier_uniform_(l.weight) l.bias.data.zero_() return l def dot_product_attention(query, key, value, mask=None, dropout=None): scores = torch.matmul(query, key.transpose(-2, -1)) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def scaled_dot_product_attention(query, key, value, mask=None, dropout=None): """Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762 We apply the query to the keys to recieve our weights via softmax, which are then applied for each value, but in a series of efficient matrix operations. In the case of self-attention, the key, query and values are all low order projections of the same input. :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param value: a set of values from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :param dropout: apply dropout operator post-attention (this is not a float) :return: A tensor that is (BxHxTxT) """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) weights = F.softmax(scores, dim=-1) if dropout is not None: weights = dropout(weights) return torch.matmul(weights, value), weights class MultiHeadedAttention(nn.Module): """ Multi-headed attention from https://arxiv.org/abs/1706.03762 via http://nlp.seas.harvard.edu/2018/04/03/attention.html Multi-headed attention provides multiple looks of low-order projections K, Q and V using an attention function (specifically `scaled_dot_product_attention` in the paper. This allows multiple relationships to be illuminated via attention on different positional and representational information from each head. The number of heads `h` times the low-order projection dim `d_k` is equal to `d_model` (which is asserted upfront). This means that each weight matrix can be simply represented as a linear transformation from `d_model` to `d_model`, and partitioned into heads after the fact. Finally, an output projection is applied which brings the output space back to `d_model`, in preparation for the sub-sequent `FFN` sub-layer. There are 3 uses of multi-head attention in the Transformer. For encoder-decoder layers, the queries come from the previous decoder layer, and the memory keys come from the encoder. For encoder layers, the K, Q and V all come from the output of the previous layer of the encoder. And for self-attention in the decoder, K, Q and V all come from the decoder, but here it is masked to prevent using future values """ def __init__(self, h, d_model, dropout=0.1, scale=False): """Constructor for multi-headed attention :param h: The number of heads :param d_model: The model hidden size :param dropout (``float``): The amount of dropout to use :param attn_fn: A function to apply attention, defaults to SDP """ super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 self.d_k = d_model // h self.h = h self.w_Q = pytorch_linear(d_model, d_model) self.w_K = pytorch_linear(d_model, d_model) self.w_V = pytorch_linear(d_model, d_model) self.w_O = pytorch_linear(d_model, d_model) self.attn_fn = (scaled_dot_product_attention if scale else dot_product_attention) self.attn = None self.dropout = nn.Dropout(dropout) def forward(self, query, key, value, mask=None): """Low-order projections of query, key and value into multiple heads, then attention application and dropout :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param value: a set of values from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: Multi-head attention output, result of attention application to sequence (B, T, d_model) """ batchsz = query.size(0) query = self.w_Q(query).view(batchsz, -1, self.h, self.d_k).transpose( 1, 2) key = self.w_K(key).view(batchsz, -1, self.h, self.d_k).transpose(1, 2) value = self.w_V(value).view(batchsz, -1, self.h, self.d_k).transpose( 1, 2) x, self.attn = self.attn_fn(query, key, value, mask=mask, dropout= self.dropout) x = x.transpose(1, 2).contiguous().view(batchsz, -1, self.h * self.d_k) return self.w_O(x) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'h': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn import torch.nn.functional as F import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_1[grid(256)](buf5, buf8, 256, 16, XBLOCK= 8, num_warps=2, num_stages=1) del buf5 buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_11 return reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0 ), buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0) def pytorch_linear(in_sz, out_sz, unif=0, initializer=None): l = nn.Linear(in_sz, out_sz) if unif > 0: l.weight.data.uniform_(-unif, unif) elif initializer == 'ortho': nn.init.orthogonal(l.weight) elif initializer == 'he' or initializer == 'kaiming': nn.init.kaiming_uniform(l.weight) else: nn.init.xavier_uniform_(l.weight) l.bias.data.zero_() return l def dot_product_attention(query, key, value, mask=None, dropout=None): scores = torch.matmul(query, key.transpose(-2, -1)) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def scaled_dot_product_attention(query, key, value, mask=None, dropout=None): """Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762 We apply the query to the keys to recieve our weights via softmax, which are then applied for each value, but in a series of efficient matrix operations. In the case of self-attention, the key, query and values are all low order projections of the same input. :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param value: a set of values from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :param dropout: apply dropout operator post-attention (this is not a float) :return: A tensor that is (BxHxTxT) """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) weights = F.softmax(scores, dim=-1) if dropout is not None: weights = dropout(weights) return torch.matmul(weights, value), weights class MultiHeadedAttentionNew(nn.Module): """ Multi-headed attention from https://arxiv.org/abs/1706.03762 via http://nlp.seas.harvard.edu/2018/04/03/attention.html Multi-headed attention provides multiple looks of low-order projections K, Q and V using an attention function (specifically `scaled_dot_product_attention` in the paper. This allows multiple relationships to be illuminated via attention on different positional and representational information from each head. The number of heads `h` times the low-order projection dim `d_k` is equal to `d_model` (which is asserted upfront). This means that each weight matrix can be simply represented as a linear transformation from `d_model` to `d_model`, and partitioned into heads after the fact. Finally, an output projection is applied which brings the output space back to `d_model`, in preparation for the sub-sequent `FFN` sub-layer. There are 3 uses of multi-head attention in the Transformer. For encoder-decoder layers, the queries come from the previous decoder layer, and the memory keys come from the encoder. For encoder layers, the K, Q and V all come from the output of the previous layer of the encoder. And for self-attention in the decoder, K, Q and V all come from the decoder, but here it is masked to prevent using future values """ def __init__(self, h, d_model, dropout=0.1, scale=False): """Constructor for multi-headed attention :param h: The number of heads :param d_model: The model hidden size :param dropout (``float``): The amount of dropout to use :param attn_fn: A function to apply attention, defaults to SDP """ super(MultiHeadedAttentionNew, self).__init__() assert d_model % h == 0 self.d_k = d_model // h self.h = h self.w_Q = pytorch_linear(d_model, d_model) self.w_K = pytorch_linear(d_model, d_model) self.w_V = pytorch_linear(d_model, d_model) self.w_O = pytorch_linear(d_model, d_model) self.attn_fn = (scaled_dot_product_attention if scale else dot_product_attention) self.attn = None self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1, input_2): primals_2 = self.w_Q.weight primals_3 = self.w_Q.bias primals_4 = self.w_K.weight primals_5 = self.w_K.bias primals_7 = self.w_V.weight primals_8 = self.w_V.bias primals_10 = self.w_O.weight primals_11 = self.w_O.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
amyhemmeter/baseline
MultiHeadedAttention
false
3,101
[ "Apache-2.0" ]
0
101a393398570747d14a32eb3af72664e2774c8b
https://github.com/amyhemmeter/baseline/tree/101a393398570747d14a32eb3af72664e2774c8b
ScaleToModel
import torch import torch.nn as nn import torch.cuda from torch import linalg as linalg class ScaleToModel(nn.Module): def __init__(self, model_value_range, test_value_range): super(ScaleToModel, self).__init__() self.m_min, self.m_max = model_value_range self.t_min, self.t_max = test_value_range def forward(self, img: 'torch.Tensor'): """ input: [test_val_min, test_val_max] """ img = (img - self.t_min) / (self.t_max - self.t_min) img = img * (self.m_max - self.m_min) + self.m_min return img def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'model_value_range': [4, 4], 'test_value_range': [4, 4]}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.cuda from torch import linalg as linalg assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = float('inf') tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = tmp4 * tmp5 tmp7 = tmp6 + tmp1 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ScaleToModelNew(nn.Module): def __init__(self, model_value_range, test_value_range): super(ScaleToModelNew, self).__init__() self.m_min, self.m_max = model_value_range self.t_min, self.t_max = test_value_range def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
angelvillar96/vp-suite
ScaleToModel
false
3,102
[ "MIT" ]
0
3e7c7d852862bad09a771d754fc56a71abf0a25f
https://github.com/angelvillar96/vp-suite/tree/3e7c7d852862bad09a771d754fc56a71abf0a25f
Attention
import torch from torch import nn import torch.nn.functional as F class Attention(nn.Module): """Implements additive attention and return the attention vector used to weight the values. Additive attention consists in concatenating key and query and then passing them trough a linear layer.""" def __init__(self, enc_hid_dim, dec_hid_dim): super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn = nn.Linear(enc_hid_dim + dec_hid_dim, dec_hid_dim) self.v = nn.Parameter(torch.rand(dec_hid_dim)) def forward(self, key, queries): batch_size = queries.shape[0] src_len = queries.shape[1] key = key.unsqueeze(1).repeat(1, src_len, 1) energy = torch.tanh(self.attn(torch.cat((key, queries), dim=2))) energy = energy.permute(0, 2, 1) v = self.v.repeat(batch_size, 1).unsqueeze(1) attention = torch.bmm(v, energy).squeeze(1) return F.softmax(attention, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'enc_hid_dim': 4, 'dec_hid_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x3 = xindex // 8 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_5, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_4[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf2, buf6, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0) class AttentionNew(nn.Module): """Implements additive attention and return the attention vector used to weight the values. Additive attention consists in concatenating key and query and then passing them trough a linear layer.""" def __init__(self, enc_hid_dim, dec_hid_dim): super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn = nn.Linear(enc_hid_dim + dec_hid_dim, dec_hid_dim) self.v = nn.Parameter(torch.rand(dec_hid_dim)) def forward(self, input_0, input_1): primals_4 = self.v primals_3 = self.attn.weight primals_5 = self.attn.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
alpgokcek/turkish-qg-model
Attention
false
3,103
[ "MIT" ]
0
e90050d869958325aeaf639a2b1ff5eb2856e318
https://github.com/alpgokcek/turkish-qg-model/tree/e90050d869958325aeaf639a2b1ff5eb2856e318
BayesLinear
from torch.nn import Module import math import torch from torch.nn import Parameter import torch.nn.functional as F class BayesLinear(Module): """ Applies Bayesian Linear Arguments: prior_mu (Float): mean of prior normal distribution. prior_sigma (Float): sigma of prior normal distribution. .. note:: other arguments are following linear of pytorch 1.2.0. https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py """ __constants__ = ['prior_mu', 'prior_sigma', 'bias', 'in_features', 'out_features'] def __init__(self, prior_mu, prior_sigma, in_features, out_features, bias=True): super(BayesLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.prior_mu = prior_mu self.prior_sigma = prior_sigma self.prior_log_sigma = math.log(prior_sigma) self.weight_mu = Parameter(torch.Tensor(out_features, in_features)) self.weight_log_sigma = Parameter(torch.Tensor(out_features, in_features)) self.register_buffer('weight_eps', None) if bias is None or bias is False: self.bias = False else: self.bias = True if self.bias: self.bias_mu = Parameter(torch.Tensor(out_features)) self.bias_log_sigma = Parameter(torch.Tensor(out_features)) self.register_buffer('bias_eps', None) else: self.register_parameter('bias_mu', None) self.register_parameter('bias_log_sigma', None) self.register_buffer('bias_eps', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight_mu.size(1)) self.weight_mu.data.uniform_(-stdv, stdv) self.weight_log_sigma.data.fill_(self.prior_log_sigma) if self.bias: self.bias_mu.data.uniform_(-stdv, stdv) self.bias_log_sigma.data.fill_(self.prior_log_sigma) def freeze(self): self.weight_eps = torch.randn_like(self.weight_log_sigma) if self.bias: self.bias_eps = torch.randn_like(self.bias_log_sigma) def unfreeze(self): self.weight_eps = None if self.bias: self.bias_eps = None def forward(self, input): """ Overriden. """ if self.weight_eps is None: weight = self.weight_mu + torch.exp(self.weight_log_sigma ) * torch.randn_like(self.weight_log_sigma) else: weight = self.weight_mu + torch.exp(self.weight_log_sigma ) * self.weight_eps if self.bias: if self.bias_eps is None: bias = self.bias_mu + torch.exp(self.bias_log_sigma ) * torch.randn_like(self.bias_log_sigma) else: bias = self.bias_mu + torch.exp(self.bias_log_sigma ) * self.bias_eps else: bias = None return F.linear(input, weight, bias) def extra_repr(self): """ Overriden. """ return ( 'prior_mu={}, prior_sigma={}, in_features={}, out_features={}, bias={}' .format(self.prior_mu, self.prior_sigma, self.in_features, self .out_features, self.bias is not None)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'prior_mu': 4, 'prior_sigma': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = torch.ops.aten.randn.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(16)](primals_1, primals_2, buf1, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_exp_mul_1[grid(4)](primals_3, primals_4, buf3, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf5, reinterpret_tensor(primals_5, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del buf4 del buf5 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, primals_4, buf1, buf3, reinterpret_tensor(primals_5, (64, 4), (4, 1), 0) class BayesLinearNew(Module): """ Applies Bayesian Linear Arguments: prior_mu (Float): mean of prior normal distribution. prior_sigma (Float): sigma of prior normal distribution. .. note:: other arguments are following linear of pytorch 1.2.0. https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py """ __constants__ = ['prior_mu', 'prior_sigma', 'bias', 'in_features', 'out_features'] def __init__(self, prior_mu, prior_sigma, in_features, out_features, bias=True): super(BayesLinearNew, self).__init__() self.in_features = in_features self.out_features = out_features self.prior_mu = prior_mu self.prior_sigma = prior_sigma self.prior_log_sigma = math.log(prior_sigma) self.weight_mu = Parameter(torch.Tensor(out_features, in_features)) self.weight_log_sigma = Parameter(torch.Tensor(out_features, in_features)) self.register_buffer('weight_eps', None) if bias is None or bias is False: self.bias = False else: self.bias = True if self.bias: self.bias_mu = Parameter(torch.Tensor(out_features)) self.bias_log_sigma = Parameter(torch.Tensor(out_features)) self.register_buffer('bias_eps', None) else: self.register_parameter('bias_mu', None) self.register_parameter('bias_log_sigma', None) self.register_buffer('bias_eps', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight_mu.size(1)) self.weight_mu.data.uniform_(-stdv, stdv) self.weight_log_sigma.data.fill_(self.prior_log_sigma) if self.bias: self.bias_mu.data.uniform_(-stdv, stdv) self.bias_log_sigma.data.fill_(self.prior_log_sigma) def freeze(self): self.weight_eps = torch.randn_like(self.weight_log_sigma) if self.bias: self.bias_eps = torch.randn_like(self.bias_log_sigma) def unfreeze(self): self.weight_eps = None if self.bias: self.bias_eps = None def extra_repr(self): """ Overriden. """ return ( 'prior_mu={}, prior_sigma={}, in_features={}, out_features={}, bias={}' .format(self.prior_mu, self.prior_sigma, self.in_features, self .out_features, self.bias is not None)) def forward(self, input_0): primals_1 = self.weight_mu primals_2 = self.weight_log_sigma primals_3 = self.bias_mu primals_4 = self.bias_log_sigma primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
anaplasia29/Bayesian-Neural-Network
BayesLinear
false
3,104
[ "MIT" ]
0
d98df8039e52cd2505dc8a94ed3cd474c2056d9a
https://github.com/anaplasia29/Bayesian-Neural-Network/tree/d98df8039e52cd2505dc8a94ed3cd474c2056d9a
Normalize
import torch import torch.nn as nn class Normalize(nn.Module): def forward(self, x): return (x - 0.1307) / 0.3081 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1307 tmp2 = tmp0 - tmp1 tmp3 = 3.245699448231094 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
anianruoss/RIAI
Normalize
false
3,105
[ "MIT" ]
0
2ac4ddcfb73c9678b1c4fe94fdaae82baceac4ea
https://github.com/anianruoss/RIAI/tree/2ac4ddcfb73c9678b1c4fe94fdaae82baceac4ea
MultiHeadSelfAttention
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn import torch.distributed import torch.distributions def compute_attention(q, k, v, dropout=None, mask=None): """ :param q: Query [B, NH, NQ, EL] or [NH, 1, EL] (in this case NQ=1) :param k: Key [B, NH, NK, EL] :param v: Value [B, NH, NK, EL] :param mask: [B, NQ, NK] :param dropout: :return: """ if q.ndim + 1 == k.ndim: score = torch.einsum('nij,bnkj->bnik', q, k) elif q.ndim == k.ndim: score = torch.einsum('bnij,bnkj->bnik', q, k) score = score / np.sqrt(q.shape[-1]) if mask is not None: mask = mask[:, None] score = score * mask + -100000000.0 * (1 - mask) score = F.softmax(score, dim=-1) if dropout is not None: score = dropout(score) return torch.einsum('bnij,bnjk->bnik', score, v) class MultiHeadedAttentionBase(nn.Module): def __init__(self, embed_dim, num_heads, latent_dim, dropout=None): """ :param embed_dim: The dimension of feature in each entity. :param num_heads: The number of attention heads. :param latent_dim: :param dropout: """ super().__init__() self.w_k = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self.w_v = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self.w_o = nn.Parameter(torch.empty(num_heads, latent_dim, embed_dim)) self.dropout = nn.Dropout(dropout) if dropout else nn.Identity() def _reset_parameters(self): nn.init.xavier_normal_(self.w_k) nn.init.xavier_normal_(self.w_v) nn.init.xavier_normal_(self.w_o) if hasattr(self, 'q'): nn.init.xavier_normal_(self.q) if hasattr(self, 'w_q'): nn.init.xavier_normal_(self.w_q) class MultiHeadSelfAttention(MultiHeadedAttentionBase): def __init__(self, embed_dim, num_heads, latent_dim, dropout=None): super().__init__(embed_dim, num_heads, latent_dim, dropout) self.w_q = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self._reset_parameters() def forward(self, x, mask=None): """ :param x: [B, N, E] [batch size, length, embed_dim] the input to the layer, a tensor of shape :param mask: [B, N, N] [batch size, length, length] :return: [B, N, E] [batch_size, length, embed_dim] Features after self attention """ q = torch.einsum('blj,njd->bnld', x, self.w_q) k = torch.einsum('blj,njd->bnld', x, self.w_k) v = torch.einsum('blj,njd->bnld', x, self.w_v) out = compute_attention(q, k, v, self.dropout, mask) out = torch.einsum('bnlj,njk->blk', out, self.w_o) out = self.dropout(out) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4, 'latent_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn.functional as F import torch.nn as nn import torch.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_sqrt_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 2.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6.to(tl.float64) tmp21 = tmp20 * tmp1 tmp22 = tmp21.to(tl.float32) tmp23 = tmp19 / tmp22 tmp24 = tl_math.exp(tmp23) tl.store(out_ptr0 + x2, tmp24, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch. float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0), out=buf1) buf2 = buf0 del buf0 triton_poi_fused_clone_0[grid(64)](primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf2, (1, 4, 16), (0, 16, 1), 0), out=buf3) buf4 = buf2 del buf2 triton_poi_fused_clone_0[grid(64)](primals_4, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf5 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf4, (1, 4, 16), (0, 16, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_1[grid(256)](buf1, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_2[grid(64, 4)](buf3, buf7, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_sqrt_3[grid(256)](buf8, buf9, 256, XBLOCK =256, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_4[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf9 triton_poi_fused_clone_1[grid(256)](buf5, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), out=buf12 ) buf13 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](buf12, buf13, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del buf12 buf14 = buf4 del buf4 triton_poi_fused_clone_0[grid(64)](primals_5, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf15 = empty_strided_cuda((1, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf13, (1, 16, 16), (0, 16, 1 ), 0), reinterpret_tensor(buf14, (1, 16, 4), (0, 4, 1), 0), out =buf15) return reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0 ), buf10, reinterpret_tensor(buf13, (1, 16, 16), (256, 1, 16), 0 ), reinterpret_tensor(buf14, (1, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(buf11, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0) def compute_attention(q, k, v, dropout=None, mask=None): """ :param q: Query [B, NH, NQ, EL] or [NH, 1, EL] (in this case NQ=1) :param k: Key [B, NH, NK, EL] :param v: Value [B, NH, NK, EL] :param mask: [B, NQ, NK] :param dropout: :return: """ if q.ndim + 1 == k.ndim: score = torch.einsum('nij,bnkj->bnik', q, k) elif q.ndim == k.ndim: score = torch.einsum('bnij,bnkj->bnik', q, k) score = score / np.sqrt(q.shape[-1]) if mask is not None: mask = mask[:, None] score = score * mask + -100000000.0 * (1 - mask) score = F.softmax(score, dim=-1) if dropout is not None: score = dropout(score) return torch.einsum('bnij,bnjk->bnik', score, v) class MultiHeadedAttentionBase(nn.Module): def __init__(self, embed_dim, num_heads, latent_dim, dropout=None): """ :param embed_dim: The dimension of feature in each entity. :param num_heads: The number of attention heads. :param latent_dim: :param dropout: """ super().__init__() self.w_k = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self.w_v = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self.w_o = nn.Parameter(torch.empty(num_heads, latent_dim, embed_dim)) self.dropout = nn.Dropout(dropout) if dropout else nn.Identity() def _reset_parameters(self): nn.init.xavier_normal_(self.w_k) nn.init.xavier_normal_(self.w_v) nn.init.xavier_normal_(self.w_o) if hasattr(self, 'q'): nn.init.xavier_normal_(self.q) if hasattr(self, 'w_q'): nn.init.xavier_normal_(self.w_q) class MultiHeadSelfAttentionNew(MultiHeadedAttentionBase): def __init__(self, embed_dim, num_heads, latent_dim, dropout=None): super().__init__(embed_dim, num_heads, latent_dim, dropout) self.w_q = nn.Parameter(torch.empty(num_heads, embed_dim, latent_dim)) self._reset_parameters() def forward(self, input_0): primals_1 = self.w_k primals_2 = self.w_v primals_3 = self.w_o primals_4 = self.w_q primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Zed-Wu/ManiSkill-Learn
MultiHeadSelfAttention
false
3,106
[ "Apache-2.0" ]
0
8056fe327752cd0863f8730672fe62bd85a0ec12
https://github.com/Zed-Wu/ManiSkill-Learn/tree/8056fe327752cd0863f8730672fe62bd85a0ec12
KLLoss
import torch import torch.nn as nn import torch.cuda from torch import linalg as linalg class BaseMeasure(nn.Module): """ """ NAME: 'str' = NotImplemented REFERENCE: 'str' = None BIGGER_IS_BETTER = False OPT_VALUE = 0.0 def __init__(self, device): """ Args: device (): """ super(BaseMeasure, self).__init__() self.device = device self def forward(self, pred: 'torch.Tensor', target: 'torch.Tensor'): """ Args: pred (): target (): Returns: """ if pred.ndim != 5 or target.ndim != 5: raise ValueError(f'{self.NAME} expects 5-D inputs!') value = self.criterion(pred, target) return value.sum(dim=(4, 3, 2)).mean(dim=1).mean(dim=0) def reshape_clamp(self, pred: 'torch.Tensor', target: 'torch.Tensor'): """ Args: pred (): target (): Returns: """ if pred.ndim != 5 or target.ndim != 5: raise ValueError(f'{self.NAME} expects 5-D inputs!') pred = pred.reshape(-1, *pred.shape[2:]) pred = ((pred + 1) / 2).clamp_(min=0.0, max=1.0) target = target.reshape(-1, *target.shape[2:]) target = ((target + 1) / 2).clamp_(min=0.0, max=1.0) return pred, target @classmethod def to_display(cls, x): """ Args: x (): Returns: """ return x class KLLoss(BaseMeasure): """ KL-Divergence loss function """ NAME = 'KL-Divergence (KL)' def __init__(self, device): super(KLLoss, self).__init__(device) def criterion(self, mu1, logvar1, mu2, logvar2): """ Computing the KL-Divergence between two Gaussian distributions """ sigma1 = logvar1.mul(0.5).exp() sigma2 = logvar2.mul(0.5).exp() kld = torch.log(sigma2 / sigma1) + (torch.exp(logvar1) + (mu1 - mu2 ) ** 2) / (2 * torch.exp(logvar2)) - 1 / 2 return kld def forward(self, mu1, logvar1, mu2, logvar2): """ Computing the KL-Divergence between two Gaussian distributions """ value = self.criterion(mu1, logvar1, mu2, logvar2) return value.sum(dim=-1).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.cuda from torch import linalg as linalg assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp41 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp61 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp4 * tmp1 tmp6 = tl_math.exp(tmp5) tmp7 = tmp3 / tmp6 tmp8 = tl_math.log(tmp7) tmp9 = tl_math.exp(tmp4) tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp9 + tmp13 tmp15 = tl_math.exp(tmp0) tmp16 = 2.0 tmp17 = tmp15 * tmp16 tmp18 = tmp14 / tmp17 tmp19 = tmp8 + tmp18 tmp20 = tmp19 - tmp1 tmp22 = tmp21 * tmp1 tmp23 = tl_math.exp(tmp22) tmp25 = tmp24 * tmp1 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 / tmp26 tmp28 = tl_math.log(tmp27) tmp29 = tl_math.exp(tmp24) tmp32 = tmp30 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp29 + tmp33 tmp35 = tl_math.exp(tmp21) tmp36 = tmp35 * tmp16 tmp37 = tmp34 / tmp36 tmp38 = tmp28 + tmp37 tmp39 = tmp38 - tmp1 tmp40 = tmp20 + tmp39 tmp42 = tmp41 * tmp1 tmp43 = tl_math.exp(tmp42) tmp45 = tmp44 * tmp1 tmp46 = tl_math.exp(tmp45) tmp47 = tmp43 / tmp46 tmp48 = tl_math.log(tmp47) tmp49 = tl_math.exp(tmp44) tmp52 = tmp50 - tmp51 tmp53 = tmp52 * tmp52 tmp54 = tmp49 + tmp53 tmp55 = tl_math.exp(tmp41) tmp56 = tmp55 * tmp16 tmp57 = tmp54 / tmp56 tmp58 = tmp48 + tmp57 tmp59 = tmp58 - tmp1 tmp60 = tmp40 + tmp59 tmp62 = tmp61 * tmp1 tmp63 = tl_math.exp(tmp62) tmp65 = tmp64 * tmp1 tmp66 = tl_math.exp(tmp65) tmp67 = tmp63 / tmp66 tmp68 = tl_math.log(tmp67) tmp69 = tl_math.exp(tmp64) tmp72 = tmp70 - tmp71 tmp73 = tmp72 * tmp72 tmp74 = tmp69 + tmp73 tmp75 = tl_math.exp(tmp61) tmp76 = tmp75 * tmp16 tmp77 = tmp74 / tmp76 tmp78 = tmp68 + tmp77 tmp79 = tmp78 - tmp1 tmp80 = tmp60 + tmp79 tmp81 = tl.broadcast_to(tmp80, [XBLOCK, RBLOCK]) tmp83 = tl.sum(tmp81, 1)[:, None] tmp84 = 64.0 tmp85 = tmp83 / tmp84 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp85, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, arg3_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class BaseMeasure(nn.Module): """ """ NAME: 'str' = NotImplemented REFERENCE: 'str' = None BIGGER_IS_BETTER = False OPT_VALUE = 0.0 def __init__(self, device): """ Args: device (): """ super(BaseMeasure, self).__init__() self.device = device self def forward(self, pred: 'torch.Tensor', target: 'torch.Tensor'): """ Args: pred (): target (): Returns: """ if pred.ndim != 5 or target.ndim != 5: raise ValueError(f'{self.NAME} expects 5-D inputs!') value = self.criterion(pred, target) return value.sum(dim=(4, 3, 2)).mean(dim=1).mean(dim=0) def reshape_clamp(self, pred: 'torch.Tensor', target: 'torch.Tensor'): """ Args: pred (): target (): Returns: """ if pred.ndim != 5 or target.ndim != 5: raise ValueError(f'{self.NAME} expects 5-D inputs!') pred = pred.reshape(-1, *pred.shape[2:]) pred = ((pred + 1) / 2).clamp_(min=0.0, max=1.0) target = target.reshape(-1, *target.shape[2:]) target = ((target + 1) / 2).clamp_(min=0.0, max=1.0) return pred, target @classmethod def to_display(cls, x): """ Args: x (): Returns: """ return x class KLLossNew(BaseMeasure): """ KL-Divergence loss function """ NAME = 'KL-Divergence (KL)' def __init__(self, device): super(KLLossNew, self).__init__(device) def criterion(self, mu1, logvar1, mu2, logvar2): """ Computing the KL-Divergence between two Gaussian distributions """ sigma1 = logvar1.mul(0.5).exp() sigma2 = logvar2.mul(0.5).exp() kld = torch.log(sigma2 / sigma1) + (torch.exp(logvar1) + (mu1 - mu2 ) ** 2) / (2 * torch.exp(logvar2)) - 1 / 2 return kld def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
angelvillar96/vp-suite
KLLoss
false
3,107
[ "MIT" ]
0
3e7c7d852862bad09a771d754fc56a71abf0a25f
https://github.com/angelvillar96/vp-suite/tree/3e7c7d852862bad09a771d754fc56a71abf0a25f
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc = nn.Linear(28 * 28, 200) self.fc2 = nn.Linear(200, 10) def forward(self, x): x = x.view((-1, 28 * 28)) x = F.relu(self.fc(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (200, 784), (784, 1)) assert_size_stride(primals_3, (200,), (1,)) assert_size_stride(primals_4, (10, 200), (200, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 200), (200, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 200), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(800)](buf1, primals_3, 800, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (200, 10), (1, 200), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, primals_1, buf1, primals_4 class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.fc = nn.Linear(28 * 28, 200) self.fc2 = nn.Linear(200, 10) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
anianruoss/RIAI
Net
false
3,108
[ "MIT" ]
0
2ac4ddcfb73c9678b1c4fe94fdaae82baceac4ea
https://github.com/anianruoss/RIAI/tree/2ac4ddcfb73c9678b1c4fe94fdaae82baceac4ea
LocationLayer
import torch import torch.utils.data import torch import torch.nn as nn class Linear(nn.Module): def __init__(self, in_features, out_features, bias=True, init_gain='linear' ): super(Linear, self).__init__() self.linear_layer = nn.Linear(in_features, out_features, bias=bias) self._init_w(init_gain) def _init_w(self, init_gain): nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(init_gain)) def forward(self, x): return self.linear_layer(x) class LocationLayer(nn.Module): def __init__(self, attention_dim, attention_n_filters=32, attention_kernel_size=31): super(LocationLayer, self).__init__() self.location_conv = nn.Conv1d(in_channels=2, out_channels= attention_n_filters, kernel_size=attention_kernel_size, stride= 1, padding=(attention_kernel_size - 1) // 2, bias=False) self.location_dense = Linear(attention_n_filters, attention_dim, bias=False, init_gain='tanh') def forward(self, attention_cat): processed_attention = self.location_conv(attention_cat) processed_attention = self.location_dense(processed_attention. transpose(1, 2)) return processed_attention def get_inputs(): return [torch.rand([4, 2, 64])] def get_init_inputs(): return [[], {'attention_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 32 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 64 y1 = yindex // 64 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 2048 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 32 * y3), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (32, 2, 31), (62, 31, 1)) assert_size_stride(primals_2, (4, 2, 64), (128, 64, 1)) assert_size_stride(primals_3, (4, 32), (32, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,), padding=(15,), dilation=(1,), transposed=False, output_padding= (0,), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64), (2048, 64, 1)) buf1 = empty_strided_cuda((4, 64, 32), (2048, 32, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256, 32)](buf0, buf1, 256, 32, XBLOCK =32, YBLOCK=32, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((256, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (256, 32), (32, 1), 0), reinterpret_tensor(primals_3, (32, 4), (1, 32), 0), out=buf2) return reinterpret_tensor(buf2, (4, 64, 4), (256, 4, 1), 0 ), primals_1, primals_2, reinterpret_tensor(buf1, (256, 32), (32, 1), 0 ), primals_3 class Linear(nn.Module): def __init__(self, in_features, out_features, bias=True, init_gain='linear' ): super(Linear, self).__init__() self.linear_layer = nn.Linear(in_features, out_features, bias=bias) self._init_w(init_gain) def _init_w(self, init_gain): nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(init_gain)) def forward(self, x): return self.linear_layer(x) class LocationLayerNew(nn.Module): def __init__(self, attention_dim, attention_n_filters=32, attention_kernel_size=31): super(LocationLayerNew, self).__init__() self.location_conv = nn.Conv1d(in_channels=2, out_channels= attention_n_filters, kernel_size=attention_kernel_size, stride= 1, padding=(attention_kernel_size - 1) // 2, bias=False) self.location_dense = Linear(attention_n_filters, attention_dim, bias=False, init_gain='tanh') def forward(self, input_0): primals_1 = self.location_conv.weight primals_3 = self.location_dense.linear_layer.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
aidiary/tacotron-pytorch
LocationLayer
false
3,109
[ "MIT" ]
0
8ea9b1bb61bf753a64ff611b441326ea8c001d20
https://github.com/aidiary/tacotron-pytorch/tree/8ea9b1bb61bf753a64ff611b441326ea8c001d20