entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
SimpleAvgPool1dModule
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleAvgPool1dModule(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super(SimpleAvgPool1dModule, self).__init__() self.kernel_size = kernel_size self.padding = padding self.stride = stride def forward(self, inputs): return F.avg_pool1d(inputs, self.kernel_size, padding=self.padding, stride=self.stride) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1), (1, 1), 0), class SimpleAvgPool1dModuleNew(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super(SimpleAvgPool1dModuleNew, self).__init__() self.kernel_size = kernel_size self.padding = padding self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleAvgPool1dModule
false
12,561
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleAddMmModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleAddMmModule(torch.nn.Module): def __init__(self, alpha=1, beta=1): super(SimpleAddMmModule, self).__init__() self.alpha = alpha self.beta = beta def forward(self, a, b, c): return (a + a).addmm(b, c) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf0, arg1_1, arg2_1, alpha=1, beta=1, out=buf1) del arg1_1 del arg2_1 del buf0 return buf1, class SimpleAddMmModuleNew(torch.nn.Module): def __init__(self, alpha=1, beta=1): super(SimpleAddMmModuleNew, self).__init__() self.alpha = alpha self.beta = beta def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
briancoutinho/glow
SimpleAddMmModule
false
12,562
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
Qux
import torch import torch.jit import torch.onnx import torch.nn class Qux(torch.nn.Module): def __init__(self, x): super(Qux, self).__init__() self.x = x def forward(self, a, b): return a - b - self.x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'x': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = 4.0 tmp4 = tmp2 - tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class QuxNew(torch.nn.Module): def __init__(self, x): super(QuxNew, self).__init__() self.x = x def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
Qux
false
12,563
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleCosModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleCosModule(torch.nn.Module): def __init__(self): super(SimpleCosModule, self).__init__() def forward(self, a): return torch.cos(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.cos(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_cos_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleCosModuleNew(torch.nn.Module): def __init__(self): super(SimpleCosModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleCosModule
false
12,564
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleClampMinModel
import torch import torch.jit import torch.onnx import torch.nn class SimpleClampMinModel(torch.nn.Module): def __init__(self, min): super(SimpleClampMinModel, self).__init__() self.min = min def forward(self, input): return torch.clamp_min(input, self.min) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'min': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleClampMinModelNew(torch.nn.Module): def __init__(self, min): super(SimpleClampMinModelNew, self).__init__() self.min = min def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleClampMinModel
false
12,565
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleExpModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleExpModule(torch.nn.Module): def forward(self, input): other = torch.exp(input) return torch.exp(other) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = tl_math.exp(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleExpModuleNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleExpModule
false
12,566
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
UNet
import torch import torch.nn as nn import torch.nn.functional as F class down(nn.Module): """ A class for creating neural network blocks containing layers: Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels, filterSize): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used as input and output channels for the second convolutional layer. filterSize : int filter size for the convolution filter. input N would create a N x N filter. """ super(down, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride= 1, padding=int((filterSize - 1) / 2)) self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride =1, padding=int((filterSize - 1) / 2)) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. Returns ------- tensor output of the NN block. """ x = F.avg_pool2d(x, 2) x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(x), negative_slope=0.1) return x class up(nn.Module): """ A class for creating neural network blocks containing layers: Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x, skpCn) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used for setting input and output channels for the second convolutional layer. """ super(up, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1) def forward(self, x, skpCn): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. skpCn : tensor skip connection input to the NN block. Returns ------- tensor output of the NN block. """ x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners =True) x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope=0.1) return x class UNet(nn.Module): """ A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet. """ super(UNet, self).__init__() self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3) self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3) self.down1 = down(32, 64, 5) self.down2 = down(64, 128, 3) self.down3 = down(128, 256, 3) self.down4 = down(256, 512, 3) self.down5 = down(512, 512, 3) self.up1 = up(512, 512) self.up2 = up(512, 256) self.up3 = up(256, 128) self.up4 = up(128, 64) self.up5 = up(64, 32) self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet. """ x = F.leaky_relu(self.conv1(x), negative_slope=0.1) s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1) s2 = self.down1(s1) s3 = self.down2(s2) s4 = self.down3(s3) s5 = self.down4(s4) x = self.down5(s5) x = self.up1(x, s5) x = self.up2(x, s4) x = self.up3(x, s3) x = self.up4(x, s2) x = self.up5(x, s1) x = F.leaky_relu(self.conv3(x), negative_slope=0.1) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'inChannels': 4, 'outChannels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.3333333333333333 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.3333333333333333 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.minimum(tmp8, tmp7) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.3333333333333333 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 4 x0 = xindex % 4 x6 = xindex // 16 x2 = xindex // 16 % 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 1024 x0 = xindex % 16 x2 = xindex // 16384 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0).to(tl .int1) tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 1024, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 16 * (-512 + x1) + 8192 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.42857142857142855 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.42857142857142855 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_20(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.42857142857142855 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 8 % 8 x0 = xindex % 8 x6 = xindex // 64 x2 = xindex // 64 % 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 512 x0 = xindex % 64 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0).to( tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4666666666666667 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4666666666666667 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 7, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4666666666666667 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x6 = xindex // 256 x2 = xindex // 256 % 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0).to( tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4838709677419355 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4838709677419355 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 15, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_32(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.4838709677419355 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x6 = xindex // 1024 x2 = xindex // 1024 % 128 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 128 x0 = xindex % 1024 x2 = xindex // 131072 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0 ).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49206349206349204 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49206349206349204 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 31, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_38(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.49206349206349204 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x6 = xindex // 4096 x2 = xindex // 4096 % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 64 x0 = xindex % 4096 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0 ).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47) = args args.clear() assert_size_stride(primals_1, (32, 4, 7, 7), (196, 49, 7, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_29, (512,), (1,)) assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_33, (256,), (1,)) assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (64,), (1,)) assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (32,), (1,)) assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_45, (32,), (1,)) assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_47, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0, primals_2, buf1, buf2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf3, primals_5, buf4, buf5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) triton_poi_fused_avg_pool2d_1[grid(131072)](buf5, buf6, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf7, primals_7, buf8, buf9, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf12 = buf7 del buf7 triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf10, primals_9, buf11, buf12, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) triton_poi_fused_avg_pool2d_3[grid(65536)](buf12, buf13, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1)) buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf14, primals_11, buf15, buf16, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1)) buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf19 = buf14 del buf14 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf17, primals_13, buf18, buf19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_avg_pool2d_5[grid(32768)](buf19, buf20, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1)) buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf21, primals_15, buf22, buf23, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf26 = buf21 del buf21 triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf24, primals_17, buf25, buf26, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_17 buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused_avg_pool2d_7[grid(16384)](buf26, buf27, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1)) buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf28, primals_19, buf29, buf30, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_19 buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1)) buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) buf33 = buf28 del buf28 triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf31, primals_21, buf32, buf33, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_21 buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch. float32) triton_poi_fused_avg_pool2d_9[grid(8192)](buf33, buf34, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1)) buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool) buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_10[grid(8192)](buf35, primals_23, buf36, buf37, 8192, XBLOCK=128, num_warps=4, num_stages=1) del buf35 del primals_23 buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1)) buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_11[grid(8192)](buf38, primals_25, buf39, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_12[grid(4)](buf40, 4, XBLOCK=4, num_warps =1, num_stages=1) buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_13[grid(4)](buf41, 4, XBLOCK=4, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_12[grid(4)](buf42, 4, XBLOCK=4, num_warps =1, num_stages=1) buf43 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_13[grid(4)](buf43, 4, XBLOCK=4, num_warps=1, num_stages=1) buf46 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_14[grid(4)](buf46, 4, XBLOCK=4, num_warps=1, num_stages=1) buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_14[grid(4)](buf48, 4, XBLOCK=4, num_warps=1, num_stages=1) buf45 = buf31 del buf31 buf49 = buf45 del buf45 buf50 = buf49 del buf49 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15[ grid(32768)](buf50, buf41, buf42, buf39, buf38, primals_25, buf40, buf43, buf46, buf48, 32768, XBLOCK=128, num_warps=4, num_stages=1) del buf38 del primals_25 buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1)) buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf51, primals_27, buf52, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0 ) del buf24 triton_poi_fused_cat_17[grid(65536)](buf52, buf51, primals_27, buf33, buf53, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf51 del primals_27 buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1)) buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf54, primals_29, buf55, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_18[grid(8)](buf56, 8, XBLOCK=8, num_warps =1, num_stages=1) buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_19[grid(8)](buf57, 8, XBLOCK=8, num_warps=1, num_stages=1) buf58 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_18[grid(8)](buf58, 8, XBLOCK=8, num_warps =1, num_stages=1) buf59 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused_add_clamp_19[grid(8)](buf59, 8, XBLOCK=8, num_warps=1, num_stages=1) buf62 = empty_strided_cuda((8,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_20[grid(8)](buf62, 8, XBLOCK=8, num_warps=1, num_stages=1) buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_20[grid(8)](buf64, 8, XBLOCK=8, num_warps=1, num_stages=1) buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0) del buf17 buf65 = buf61 del buf61 buf66 = buf65 del buf65 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21[ grid(131072)](buf66, buf57, buf58, buf55, buf54, primals_29, buf56, buf59, buf62, buf64, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf54 del primals_29 buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1)) buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf67, primals_31, buf68, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused_cat_23[grid(131072)](buf68, buf67, primals_31, buf26, buf69, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf67 del primals_31 buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1)) buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf70, primals_33, buf71, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_24[grid(16)](buf72, 16, XBLOCK=16, num_warps=1, num_stages=1) buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_25[grid(16)](buf73, 16, XBLOCK=16, num_warps=1, num_stages=1) buf74 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_24[grid(16)](buf74, 16, XBLOCK=16, num_warps=1, num_stages=1) buf75 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused_add_clamp_25[grid(16)](buf75, 16, XBLOCK=16, num_warps=1, num_stages=1) buf78 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_26[grid(16)](buf78, 16, XBLOCK=16, num_warps=1, num_stages=1) buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_26[grid(16)](buf80, 16, XBLOCK=16, num_warps=1, num_stages=1) buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16, 1), 0) del buf10 buf81 = buf77 del buf77 buf82 = buf81 del buf81 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27[ grid(262144)](buf82, buf73, buf74, buf71, buf70, primals_33, buf72, buf75, buf78, buf80, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_33 buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1)) buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf83, primals_35, buf84, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) triton_poi_fused_cat_29[grid(262144)](buf84, buf83, primals_35, buf19, buf85, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf83 del primals_35 buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1)) buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf86, primals_37, buf87, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_30[grid(32)](buf88, 32, XBLOCK=32, num_warps=1, num_stages=1) buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_31[grid(32)](buf89, 32, XBLOCK=32, num_warps=1, num_stages=1) buf90 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_30[grid(32)](buf90, 32, XBLOCK=32, num_warps=1, num_stages=1) buf91 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_add_clamp_31[grid(32)](buf91, 32, XBLOCK=32, num_warps=1, num_stages=1) buf94 = empty_strided_cuda((32,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_32[grid(32)](buf94, 32, XBLOCK=32, num_warps=1, num_stages=1) buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_32[grid(32)](buf96, 32, XBLOCK=32, num_warps=1, num_stages=1) buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0) del buf3 buf97 = buf93 del buf93 buf98 = buf97 del buf97 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33[ grid(524288)](buf98, buf89, buf90, buf87, buf86, primals_37, buf88, buf91, buf94, buf96, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf86 del primals_37 buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf99, primals_39, buf100, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) triton_poi_fused_cat_35[grid(524288)](buf100, buf99, primals_39, buf12, buf101, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf99 del primals_39 buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf102, primals_41, buf103, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_36[grid(64)](buf104, 64, XBLOCK=64, num_warps=1, num_stages=1) buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_37[grid(64)](buf105, 64, XBLOCK=64, num_warps=1, num_stages=1) buf106 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_36[grid(64)](buf106, 64, XBLOCK=64, num_warps=1, num_stages=1) buf107 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_37[grid(64)](buf107, 64, XBLOCK=64, num_warps=1, num_stages=1) buf110 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_38[grid(64)](buf110, 64, XBLOCK=64, num_warps=1, num_stages=1) buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_38[grid(64)](buf112, 64, XBLOCK=64, num_warps=1, num_stages=1) buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) buf113 = buf109 del buf109 buf114 = buf113 del buf113 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39[ grid(1048576)](buf114, buf105, buf106, buf103, buf102, primals_41, buf104, buf107, buf110, buf112, 1048576, XBLOCK= 1024, num_warps=4, num_stages=1) del buf102 del primals_41 buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_40[grid(524288)](buf115, primals_43, buf116, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_41[grid(1048576)](buf116, buf115, primals_43, buf5, buf117, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf120 = buf115 del buf115 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf118, primals_45, buf119, buf120, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf118 del primals_45 buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool) buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64, 1), 0) del buf70 triton_poi_fused_convolution_leaky_relu_42[grid(65536)](buf121, primals_47, buf122, buf123, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf121 del primals_47 return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30, buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42, buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57, buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72, buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87, buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101, buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114, buf116, buf117, buf119, buf120, buf122) class down(nn.Module): """ A class for creating neural network blocks containing layers: Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels, filterSize): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used as input and output channels for the second convolutional layer. filterSize : int filter size for the convolution filter. input N would create a N x N filter. """ super(down, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride= 1, padding=int((filterSize - 1) / 2)) self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride =1, padding=int((filterSize - 1) / 2)) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. Returns ------- tensor output of the NN block. """ x = F.avg_pool2d(x, 2) x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(x), negative_slope=0.1) return x class up(nn.Module): """ A class for creating neural network blocks containing layers: Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x, skpCn) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used for setting input and output channels for the second convolutional layer. """ super(up, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1) def forward(self, x, skpCn): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. skpCn : tensor skip connection input to the NN block. Returns ------- tensor output of the NN block. """ x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners =True) x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope=0.1) return x class UNetNew(nn.Module): """ A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet. """ super(UNetNew, self).__init__() self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3) self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3) self.down1 = down(32, 64, 5) self.down2 = down(64, 128, 3) self.down3 = down(128, 256, 3) self.down4 = down(256, 512, 3) self.down5 = down(512, 512, 3) self.up1 = up(512, 512) self.up2 = up(512, 256) self.up3 = up(256, 128) self.up4 = up(128, 64) self.up5 = up(64, 32) self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.down1.conv1.weight primals_7 = self.down1.conv1.bias primals_8 = self.down1.conv2.weight primals_9 = self.down1.conv2.bias primals_10 = self.down2.conv1.weight primals_11 = self.down2.conv1.bias primals_12 = self.down2.conv2.weight primals_13 = self.down2.conv2.bias primals_14 = self.down3.conv1.weight primals_15 = self.down3.conv1.bias primals_16 = self.down3.conv2.weight primals_17 = self.down3.conv2.bias primals_18 = self.down4.conv1.weight primals_19 = self.down4.conv1.bias primals_20 = self.down4.conv2.weight primals_21 = self.down4.conv2.bias primals_22 = self.down5.conv1.weight primals_23 = self.down5.conv1.bias primals_24 = self.down5.conv2.weight primals_25 = self.down5.conv2.bias primals_26 = self.up1.conv1.weight primals_27 = self.up1.conv1.bias primals_28 = self.up1.conv2.weight primals_29 = self.up1.conv2.bias primals_30 = self.up2.conv1.weight primals_31 = self.up2.conv1.bias primals_32 = self.up2.conv2.weight primals_33 = self.up2.conv2.bias primals_34 = self.up3.conv1.weight primals_35 = self.up3.conv1.bias primals_36 = self.up3.conv2.weight primals_37 = self.up3.conv2.bias primals_38 = self.up4.conv1.weight primals_39 = self.up4.conv1.bias primals_40 = self.up4.conv2.weight primals_41 = self.up4.conv2.bias primals_42 = self.up5.conv1.weight primals_43 = self.up5.conv1.bias primals_44 = self.up5.conv2.weight primals_45 = self.up5.conv2.bias primals_46 = self.conv3.weight primals_47 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47]) return output[0]
guilindner/Super-SloMo
UNet
false
12,567
[ "MIT" ]
0
251200f907581b31d41ccb1abeb7504e377cf4fb
https://github.com/guilindner/Super-SloMo/tree/251200f907581b31d41ccb1abeb7504e377cf4fb
SimpleCeilModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleCeilModule(torch.nn.Module): def forward(self, a, b): c = a + b return torch.ceil(c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_ceil_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.ceil(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_ceil_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleCeilModuleNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleCeilModule
false
12,568
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleAbsModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleAbsModule(torch.nn.Module): def __init__(self): super(SimpleAbsModule, self).__init__() def forward(self, a): return torch.abs(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.abs(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleAbsModuleNew(torch.nn.Module): def __init__(self): super(SimpleAbsModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleAbsModule
false
12,569
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleCumSumModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleCumSumModule(torch.nn.Module): def __init__(self, dim): super(SimpleCumSumModule, self).__init__() self.dim = dim def forward(self, tensor): return torch.cumsum(tensor, self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl .constexpr): xnumel = 256 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tmp0.to(tl.float32) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_0[grid(256)](arg0_1, buf0, 256, 4, XBLOCK= 32, num_warps=2, num_stages=1) del arg0_1 return buf0, class SimpleCumSumModuleNew(torch.nn.Module): def __init__(self, dim): super(SimpleCumSumModuleNew, self).__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleCumSumModule
false
12,570
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleNotModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleNotModule(torch.nn.Module): def __init__(self): super(SimpleNotModule, self).__init__() def forward(self, a): b = torch.logical_not(a) return torch.logical_not(b) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logical_not_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 != 0 tmp2 = tmp1 == 0 tmp3 = tmp2 == 0 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_logical_not_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleNotModuleNew(torch.nn.Module): def __init__(self): super(SimpleNotModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleNotModule
false
12,571
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleFloorModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleFloorModule(torch.nn.Module): def forward(self, a, b): c = a + b return torch.floor(c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_floor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_floor_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleFloorModuleNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleFloorModule
false
12,572
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
Foo
import torch import torch.jit import torch.onnx import torch.nn class Foo(torch.nn.Module): def __init__(self): super(Foo, self).__init__() self.conv1 = torch.nn.Conv2d(3, 6, 3) self.relu = torch.nn.ReLU() self.conv2 = torch.nn.Conv2d(6, 16, 3) def forward(self, x): x = self.conv1(x) x = self.relu(x) y = self.conv2(x) return y def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 92256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (6, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (16, 6, 3, 3), (54, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 62, 62), (23064, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(92256)](buf1, primals_2, 92256, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 60, 60), (57600, 3600, 60, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(230400)](buf3, primals_5, 230400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class FooNew(torch.nn.Module): def __init__(self): super(FooNew, self).__init__() self.conv1 = torch.nn.Conv2d(3, 6, 3) self.relu = torch.nn.ReLU() self.conv2 = torch.nn.Conv2d(6, 16, 3) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
briancoutinho/glow
Foo
false
12,573
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleGeluModule
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleGeluModule(torch.nn.Module): def forward(self, tensor): return F.gelu(tensor + tensor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865476 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_gelu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleGeluModuleNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleGeluModule
false
12,574
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleReciprocalModel
import torch import torch.jit import torch.onnx import torch.nn class SimpleReciprocalModel(torch.nn.Module): def __init__(self, inplace=False): super(SimpleReciprocalModel, self).__init__() self.inplace = inplace def forward(self, tensor): other = tensor + tensor return other.reciprocal_() if self.inplace else torch.reciprocal(other) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tl.full([1], 1, tl.int32) tmp3 = tmp2 / tmp1 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleReciprocalModelNew(torch.nn.Module): def __init__(self, inplace=False): super(SimpleReciprocalModelNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleReciprocalModel
false
12,575
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleLogSoftmaxModel
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleLogSoftmaxModel(torch.nn.Module): def __init__(self, dimension): super(SimpleLogSoftmaxModel, self).__init__() self.dimension = dimension def forward(self, tensor): return F.log_softmax(tensor, self.dimension) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dimension': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(1024)](buf0, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf0 return buf1, class SimpleLogSoftmaxModelNew(torch.nn.Module): def __init__(self, dimension): super(SimpleLogSoftmaxModelNew, self).__init__() self.dimension = dimension def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleLogSoftmaxModel
false
12,576
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleMatmulModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleMatmulModule(torch.nn.Module): def __init__(self): super(SimpleMatmulModule, self).__init__() def forward(self, a, b): return a.matmul(b + b) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out =buf1) del arg0_1 del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), class SimpleMatmulModuleNew(torch.nn.Module): def __init__(self): super(SimpleMatmulModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleMatmulModule
false
12,577
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleMaxModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleMaxModule(torch.nn.Module): def __init__(self): super(SimpleMaxModule, self).__init__() def forward(self, a, b): return torch.max(a + a, b + b) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_maximum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 + tmp0 tmp3 = tmp2 + tmp2 tmp4 = triton_helpers.maximum(tmp1, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_maximum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleMaxModuleNew(torch.nn.Module): def __init__(self): super(SimpleMaxModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleMaxModule
false
12,578
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleReluModel
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleReluModel(torch.nn.Module): def __init__(self, inplace=False): super(SimpleReluModel, self).__init__() self.inplace = inplace def forward(self, tensor): other = F.relu(tensor, inplace=self.inplace) return F.relu(other, inplace=self.inplace) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = triton_helpers.maximum(tmp1, tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleReluModelNew(torch.nn.Module): def __init__(self, inplace=False): super(SimpleReluModelNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleReluModel
false
12,579
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleLinearModule
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleLinearModule(torch.nn.Module): def __init__(self): super(SimpleLinearModule, self).__init__() def forward(self, input, weight, bias=None): return F.linear(input + input, weight, bias) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1) del arg1_1 del buf0 return buf1, class SimpleLinearModuleNew(torch.nn.Module): def __init__(self): super(SimpleLinearModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleLinearModule
false
12,580
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleFmodModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleFmodModule(torch.nn.Module): def __init__(self): super(SimpleFmodModule, self).__init__() def forward(self, a, b): if b.size() == torch.Size([]): c = a.fmod(b.item()) else: c = a.fmod(b) return c.fmod(1.0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_fmod_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.fmod(tmp0, tmp1) tmp3 = 1.0 tmp4 = libdevice.fmod(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_fmod_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleFmodModuleNew(torch.nn.Module): def __init__(self): super(SimpleFmodModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleFmodModule
false
12,581
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleMinModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleMinModule(torch.nn.Module): def __init__(self): super(SimpleMinModule, self).__init__() def forward(self, a, b): return torch.min(a + a, b + b) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_minimum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 + tmp0 tmp3 = tmp2 + tmp2 tmp4 = triton_helpers.minimum(tmp1, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_minimum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleMinModuleNew(torch.nn.Module): def __init__(self): super(SimpleMinModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleMinModule
false
12,582
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleReshapeModel
import torch import torch.jit import torch.onnx import torch.nn class SimpleReshapeModel(torch.nn.Module): def __init__(self, shape): super(SimpleReshapeModel, self).__init__() self.shape = shape def forward(self, tensor): combined = tensor + tensor return combined.reshape(self.shape) def get_inputs(): return [torch.rand([4])] def get_init_inputs(): return [[], {'shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return buf0, class SimpleReshapeModelNew(torch.nn.Module): def __init__(self, shape): super(SimpleReshapeModelNew, self).__init__() self.shape = shape def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleReshapeModel
false
12,583
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleOrModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleOrModule(torch.nn.Module): def __init__(self): super(SimpleOrModule, self).__init__() def forward(self, a, b): c = torch.logical_or(a, b) return torch.logical_or(c, c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logical_or_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 != 0 tmp3 = tmp2 != 0 tmp4 = tmp1 | tmp3 tmp5 = tmp4 | tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_logical_or_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleOrModuleNew(torch.nn.Module): def __init__(self): super(SimpleOrModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleOrModule
false
12,584
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimplePowModule
import torch import torch.jit import torch.onnx import torch.nn class SimplePowModule(torch.nn.Module): def __init__(self, power): super(SimplePowModule, self).__init__() self.power = power def forward(self, tensor): return torch.pow(tensor, self.power) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'power': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = tmp1 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimplePowModuleNew(torch.nn.Module): def __init__(self, power): super(SimplePowModuleNew, self).__init__() self.power = power def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimplePowModule
false
12,585
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleLogModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleLogModule(torch.nn.Module): def __init__(self, *dimensions): super(SimpleLogModule, self).__init__() def forward(self, a): b = torch.log(a) return torch.log(b) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.log(tmp0) tmp2 = tl_math.log(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleLogModuleNew(torch.nn.Module): def __init__(self, *dimensions): super(SimpleLogModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleLogModule
false
12,586
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleXorModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleXorModule(torch.nn.Module): def __init__(self): super(SimpleXorModule, self).__init__() def forward(self, a, b): c = torch.logical_xor(a, b) return torch.logical_xor(c, c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logical_xor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 != 0 tmp3 = tmp2 != 0 tmp4 = tmp1 ^ tmp3 tmp5 = tmp4 ^ tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_logical_xor_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleXorModuleNew(torch.nn.Module): def __init__(self): super(SimpleXorModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleXorModule
false
12,587
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleMulModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleMulModule(torch.nn.Module): def __init__(self): super(SimpleMulModule, self).__init__() def forward(self, left, right): other = left.mul(right.item() if right.size() == torch.Size([]) else right) return other.mul(other) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleMulModuleNew(torch.nn.Module): def __init__(self): super(SimpleMulModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleMulModule
false
12,588
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleSinModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleSinModule(torch.nn.Module): def __init__(self): super(SimpleSinModule, self).__init__() def forward(self, a): return torch.sin(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.sin(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_sin_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleSinModuleNew(torch.nn.Module): def __init__(self): super(SimpleSinModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleSinModule
false
12,589
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleStackModel
import torch import torch.jit import torch.onnx import torch.nn class SimpleStackModel(torch.nn.Module): def __init__(self, dim): super(SimpleStackModel, self).__init__() self.dim = dim def forward(self, a, b): c = b + b return torch.stack((a, c), dim=self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + x1, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp9 + tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(512)](arg1_1, arg0_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleStackModelNew(torch.nn.Module): def __init__(self, dim): super(SimpleStackModelNew, self).__init__() self.dim = dim def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
briancoutinho/glow
SimpleStackModel
false
12,590
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleSoftmaxModel
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleSoftmaxModel(torch.nn.Module): def __init__(self, dimension): super(SimpleSoftmaxModel, self).__init__() self.dimension = dimension def forward(self, tensor): return F.softmax(tensor, self.dimension) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dimension': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(1024)](buf0, buf1, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 return buf1, class SimpleSoftmaxModelNew(torch.nn.Module): def __init__(self, dimension): super(SimpleSoftmaxModelNew, self).__init__() self.dimension = dimension def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleSoftmaxModel
false
12,591
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
AffineTransform
import torch from torch import nn class AffineTransform(nn.Module): def __init__(self, num_features): super().__init__() self.alpha = nn.Parameter(torch.ones(1, 1, num_features)) self.beta = nn.Parameter(torch.zeros(1, 1, num_features)) def forward(self, x): return self.alpha * x + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class AffineTransformNew(nn.Module): def __init__(self, num_features): super().__init__() self.alpha = nn.Parameter(torch.ones(1, 1, num_features)) self.beta = nn.Parameter(torch.zeros(1, 1, num_features)) def forward(self, input_0): primals_1 = self.alpha primals_3 = self.beta primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jaketae/res-mlp
AffineTransform
false
12,592
[ "MIT" ]
0
6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd
https://github.com/jaketae/res-mlp/tree/6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd
SimpleSumModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleSumModule(torch.nn.Module): def __init__(self, dtype=None): super(SimpleSumModule, self).__init__() self.dtype = dtype def forward(self, a): b = a + a return torch.sum(b, dtype=self.dtype) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_sum_0(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 + tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp4, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_sum_0[grid(1)](arg0_1, buf0, 1, 256, num_warps =2, num_stages=1) del arg0_1 return buf0, class SimpleSumModuleNew(torch.nn.Module): def __init__(self, dtype=None): super(SimpleSumModuleNew, self).__init__() self.dtype = dtype def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleSumModule
false
12,593
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleTanhModel
import torch import torch.jit import torch.onnx import torch.nn class SimpleTanhModel(torch.nn.Module): def __init__(self, inplace=False): super(SimpleTanhModel, self).__init__() self.inplace = inplace def forward(self, tensor): tensor = tensor + tensor return tensor.tanh_() if self.inplace else tensor.tanh() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.tanh(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleTanhModelNew(torch.nn.Module): def __init__(self, inplace=False): super(SimpleTanhModelNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleTanhModel
false
12,594
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
SimpleTypeasModel
import torch import torch.jit import torch.onnx import torch.nn class SimpleTypeasModel(torch.nn.Module): def __init__(self): super(SimpleTypeasModel, self).__init__() def forward(self, tensor, other=None): other = tensor if other is None else other if tensor.dtype != torch.bool: tensor = tensor + tensor typed = tensor.type_as(other) return typed + typed def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tmp1 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleTypeasModelNew(torch.nn.Module): def __init__(self): super(SimpleTypeasModelNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
briancoutinho/glow
SimpleTypeasModel
false
12,595
[ "Apache-2.0" ]
0
4c919d60b3c33296c4109aec8020a1733c98f5b5
https://github.com/briancoutinho/glow/tree/4c919d60b3c33296c4109aec8020a1733c98f5b5
AddAndNorm
import torch import torch.nn as nn class AddAndNorm(nn.Module): def __init__(self, d_model, p_drop): super(AddAndNorm, self).__init__() self.layer_norm = nn.LayerNorm(d_model) self.dropout = nn.Dropout(p_drop) def forward(self, inputs, x): return self.layer_norm(inputs + self.dropout(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'p_drop': 0.5}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0[grid(64)](primals_2, primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(256)](primals_2, primals_1, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 del primals_3 del primals_4 return buf3, buf2 class AddAndNormNew(nn.Module): def __init__(self, d_model, p_drop): super(AddAndNormNew, self).__init__() self.layer_norm = nn.LayerNorm(d_model) self.dropout = nn.Dropout(p_drop) def forward(self, input_0, input_1): primals_3 = self.layer_norm.weight primals_4 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
jaehyek/attention-is-all-you-need
AddAndNorm
false
12,596
[ "MIT" ]
0
9b421f7c98414aeb9f397c5195e3a6a9080a4669
https://github.com/jaehyek/attention-is-all-you-need/tree/9b421f7c98414aeb9f397c5195e3a6a9080a4669
ResMLPLayer
import torch from torch import nn from torch.nn import functional as F class AffineTransform(nn.Module): def __init__(self, num_features): super().__init__() self.alpha = nn.Parameter(torch.ones(1, 1, num_features)) self.beta = nn.Parameter(torch.zeros(1, 1, num_features)) def forward(self, x): return self.alpha * x + self.beta class CommunicationLayer(nn.Module): def __init__(self, num_features, num_patches): super().__init__() self.aff1 = AffineTransform(num_features) self.fc1 = nn.Linear(num_patches, num_patches) self.aff2 = AffineTransform(num_features) def forward(self, x): x = self.aff1(x) residual = x x = self.fc1(x.transpose(1, 2)).transpose(1, 2) x = self.aff2(x) out = x + residual return out class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor): super().__init__() num_hidden = num_features * expansion_factor self.aff1 = AffineTransform(num_features) self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.aff2 = AffineTransform(num_features) def forward(self, x): x = self.aff1(x) residual = x x = self.fc1(x) x = F.gelu(x) x = self.fc2(x) x = self.aff2(x) out = x + residual return out class ResMLPLayer(nn.Module): def __init__(self, num_features, num_patches, expansion_factor): super().__init__() self.cl = CommunicationLayer(num_features, num_patches) self.ff = FeedForward(num_features, expansion_factor) def forward(self, x): x = self.cl(x) out = self.ff(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'num_patches': 4, 'expansion_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr5 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp10 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp6 + tmp11 tl.store(out_ptr0 + x4, tmp12, xmask) @triton.jit def triton_poi_fused_add_gelu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp8 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = tmp4 + tmp9 tl.store(out_ptr0 + x4, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_7, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_8, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_9, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_15, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 4, 16, 1), torch.float32) triton_poi_fused_add_mul_1[grid(256)](primals_6, buf1, primals_5, primals_7, primals_1, primals_2, primals_3, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 del primals_3 del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(256)](primals_8, buf2, primals_9, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_add_gelu_2[grid(1024)](buf4, primals_11, buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf5, (64, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf6) del primals_13 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_14, buf6, primals_15, primals_8, buf2, primals_9, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 del primals_9 return (buf7, primals_2, primals_5, primals_6, primals_8, primals_11, primals_14, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 16), (16, 1), 0), buf6, primals_12, primals_10, primals_4) class AffineTransform(nn.Module): def __init__(self, num_features): super().__init__() self.alpha = nn.Parameter(torch.ones(1, 1, num_features)) self.beta = nn.Parameter(torch.zeros(1, 1, num_features)) def forward(self, x): return self.alpha * x + self.beta class CommunicationLayer(nn.Module): def __init__(self, num_features, num_patches): super().__init__() self.aff1 = AffineTransform(num_features) self.fc1 = nn.Linear(num_patches, num_patches) self.aff2 = AffineTransform(num_features) def forward(self, x): x = self.aff1(x) residual = x x = self.fc1(x.transpose(1, 2)).transpose(1, 2) x = self.aff2(x) out = x + residual return out class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor): super().__init__() num_hidden = num_features * expansion_factor self.aff1 = AffineTransform(num_features) self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.aff2 = AffineTransform(num_features) def forward(self, x): x = self.aff1(x) residual = x x = self.fc1(x) x = F.gelu(x) x = self.fc2(x) x = self.aff2(x) out = x + residual return out class ResMLPLayerNew(nn.Module): def __init__(self, num_features, num_patches, expansion_factor): super().__init__() self.cl = CommunicationLayer(num_features, num_patches) self.ff = FeedForward(num_features, expansion_factor) def forward(self, input_0): primals_1 = self.cl.aff1.alpha primals_3 = self.cl.aff1.beta primals_4 = self.cl.fc1.weight primals_5 = self.cl.fc1.bias primals_6 = self.cl.aff2.alpha primals_7 = self.cl.aff2.beta primals_8 = self.ff.aff1.alpha primals_9 = self.ff.aff1.beta primals_10 = self.ff.fc1.weight primals_11 = self.ff.fc1.bias primals_12 = self.ff.fc2.weight primals_13 = self.ff.fc2.bias primals_14 = self.ff.aff2.alpha primals_15 = self.ff.aff2.beta primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
jaketae/res-mlp
ResMLPLayer
false
12,597
[ "MIT" ]
0
6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd
https://github.com/jaketae/res-mlp/tree/6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd
TSAFusion
import torch from torch import nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg import torch.utils.data from torch.utils import data as data from torch import autograd as autograd class TSAFusion(nn.Module): """Temporal Spatial Attention (TSA) fusion module. Temporal: Calculate the correlation between center frame and neighboring frames; Spatial: It has 3 pyramid levels, the attention is similar to SFT. (SFT: Recovering realistic texture in image super-resolution by deep spatial feature transform.) Args: num_feat (int): Channel number of middle features. Default: 64. num_frame (int): Number of frames. Default: 5. center_frame_idx (int): The index of center frame. Default: 2. """ def __init__(self, num_feat=64, num_frame=5, center_frame_idx=2): super(TSAFusion, self).__init__() self.center_frame_idx = center_frame_idx self.temporal_attn1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.temporal_attn2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.feat_fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1) self.max_pool = nn.MaxPool2d(3, stride=2, padding=1) self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1) self.spatial_attn1 = nn.Conv2d(num_frame * num_feat, num_feat, 1) self.spatial_attn2 = nn.Conv2d(num_feat * 2, num_feat, 1) self.spatial_attn3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn4 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn5 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn_l1 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn_l2 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1) self.spatial_attn_l3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn_add1 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn_add2 = nn.Conv2d(num_feat, num_feat, 1) self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, aligned_feat): """ Args: aligned_feat (Tensor): Aligned features with shape (b, t, c, h, w). Returns: Tensor: Features after TSA with the shape (b, c, h, w). """ b, t, c, h, w = aligned_feat.size() embedding_ref = self.temporal_attn1(aligned_feat[:, self. center_frame_idx, :, :, :].clone()) embedding = self.temporal_attn2(aligned_feat.view(-1, c, h, w)) embedding = embedding.view(b, t, -1, h, w) corr_l = [] for i in range(t): emb_neighbor = embedding[:, i, :, :, :] corr = torch.sum(emb_neighbor * embedding_ref, 1) corr_l.append(corr.unsqueeze(1)) corr_prob = torch.sigmoid(torch.cat(corr_l, dim=1)) corr_prob = corr_prob.unsqueeze(2).expand(b, t, c, h, w) corr_prob = corr_prob.contiguous().view(b, -1, h, w) aligned_feat = aligned_feat.view(b, -1, h, w) * corr_prob feat = self.lrelu(self.feat_fusion(aligned_feat)) attn = self.lrelu(self.spatial_attn1(aligned_feat)) attn_max = self.max_pool(attn) attn_avg = self.avg_pool(attn) attn = self.lrelu(self.spatial_attn2(torch.cat([attn_max, attn_avg], dim=1))) attn_level = self.lrelu(self.spatial_attn_l1(attn)) attn_max = self.max_pool(attn_level) attn_avg = self.avg_pool(attn_level) attn_level = self.lrelu(self.spatial_attn_l2(torch.cat([attn_max, attn_avg], dim=1))) attn_level = self.lrelu(self.spatial_attn_l3(attn_level)) attn_level = self.upsample(attn_level) attn = self.lrelu(self.spatial_attn3(attn)) + attn_level attn = self.lrelu(self.spatial_attn4(attn)) attn = self.upsample(attn) attn = self.spatial_attn5(attn) attn_add = self.spatial_attn_add2(self.lrelu(self.spatial_attn_add1 (attn))) attn = torch.sigmoid(attn) feat = feat * attn * 2 + attn_add return feat def get_inputs(): return [torch.rand([4, 5, 64, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg import torch.utils.data from torch.utils import data as data from torch import autograd as autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 1024 x1 = xindex // 1024 x2 = xindex tmp0 = tl.load(in_ptr0 + (2048 + x0 + 5120 * x1), None) tl.store(out_ptr0 + x2, tmp0, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_per_fused_cat_mul_sum_3(in_ptr0, in_ptr1, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp7 = tl.load(in_ptr0 + (1024 + x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (2048 + x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp19 = tl.load(in_ptr0 + (3072 + x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp25 = tl.load(in_ptr0 + (4096 + x0 + 16 * r2 + 5120 * x1), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp7 * tmp1 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp14 = tmp13 * tmp1 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp20 = tmp19 * tmp1 tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) tmp23 = tl.where(xmask, tmp21, 0) tmp24 = tl.sum(tmp23, 1)[:, None] tmp26 = tmp25 * tmp1 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.where(xmask, tmp27, 0) tmp30 = tl.sum(tmp29, 1)[:, None] tl.store(out_ptr5 + (x0 + 80 * x1), tmp6, xmask) tl.store(out_ptr6 + (x0 + 80 * x1), tmp12, xmask) tl.store(out_ptr7 + (x0 + 80 * x1), tmp18, xmask) tl.store(out_ptr8 + (x0 + 80 * x1), tmp24, xmask) tl.store(out_ptr9 + (x0 + 80 * x1), tmp30, xmask) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 320 x2 = xindex // 5120 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 16 * (x1 // 64) + 80 * x2), None) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x5 = xindex // 2 x3 = xindex // 256 x6 = xindex % 256 x7 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x5), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x5), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x5), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 * x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x5), tmp30 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x5), tmp33 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x5), tmp36 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + 2 * x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x5), tmp43 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x5), tmp46 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x5), tmp49 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tmp77 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x5), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp78 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x5), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp79 = tmp78 + tmp77 tmp80 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x5), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp81 = tmp80 + tmp79 tmp82 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x5), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tmp82 + tmp81 tmp84 = tl.load(in_ptr0 + (2 * x0 + 8 * x5), tmp33 & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tmp84 + tmp83 tmp86 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x5), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp87 = tmp86 + tmp85 tmp88 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x5), tmp43 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tmp88 + tmp87 tmp90 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x5), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp91 = tmp90 + tmp89 tmp92 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x5), tmp49 & xmask, eviction_policy='evict_last', other=0.0) tmp93 = tmp92 + tmp91 tmp94 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -2 * x0 * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -2 * x1 * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) + (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5) ) tmp95 = tmp93 / tmp94 tl.store(out_ptr0 + (x6 + 512 * x3), tmp51, xmask) tl.store(out_ptr1 + x7, tmp76, xmask) tl.store(out_ptr2 + (x6 + 512 * x3), tmp95, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.full([1], -1, tl.int64) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tmp5 & tmp5 tmp7 = tl.load(in_ptr0 + (-3 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp8 = tmp1 >= tmp1 tmp9 = tmp1 < tmp3 tmp10 = tmp8 & tmp9 tmp11 = tmp5 & tmp10 tmp12 = tl.load(in_ptr0 + (-2 + 4 * x2), tmp11 & xmask, eviction_policy ='evict_last', other=float('-inf')) tmp13 = triton_helpers.maximum(tmp12, tmp7) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp14 >= tmp1 tmp16 = tmp14 < tmp3 tmp17 = tmp15 & tmp16 tmp18 = tmp5 & tmp17 tmp19 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp18 & xmask, eviction_policy ='evict_last', other=float('-inf')) tmp20 = triton_helpers.maximum(tmp19, tmp13) tmp21 = tmp10 & tmp5 tmp22 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp21 & xmask, eviction_policy ='evict_last', other=float('-inf')) tmp23 = triton_helpers.maximum(tmp22, tmp20) tmp24 = tmp10 & tmp10 tmp25 = tl.load(in_ptr0 + 4 * x2, tmp24 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp26 = triton_helpers.maximum(tmp25, tmp23) tmp27 = tmp10 & tmp17 tmp28 = tl.load(in_ptr0 + (1 + 4 * x2), tmp27 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp29 = triton_helpers.maximum(tmp28, tmp26) tmp30 = tmp17 & tmp5 tmp31 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp29) tmp33 = tmp17 & tmp10 tmp34 = tl.load(in_ptr0 + (2 + 4 * x2), tmp33 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp17 & tmp17 tmp37 = tl.load(in_ptr0 + (3 + 4 * x2), tmp36 & xmask, eviction_policy= 'evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = tmp12 > tmp7 tmp40 = tl.full([1], 1, tl.int8) tmp41 = tl.full([1], 0, tl.int8) tmp42 = tl.where(tmp39, tmp40, tmp41) tmp43 = tmp19 > tmp13 tmp44 = tl.full([1], 2, tl.int8) tmp45 = tl.where(tmp43, tmp44, tmp42) tmp46 = tmp22 > tmp20 tmp47 = tl.full([1], 3, tl.int8) tmp48 = tl.where(tmp46, tmp47, tmp45) tmp49 = tmp25 > tmp23 tmp50 = tl.full([1], 4, tl.int8) tmp51 = tl.where(tmp49, tmp50, tmp48) tmp52 = tmp28 > tmp26 tmp53 = tl.full([1], 5, tl.int8) tmp54 = tl.where(tmp52, tmp53, tmp51) tmp55 = tmp31 > tmp29 tmp56 = tl.full([1], 6, tl.int8) tmp57 = tl.where(tmp55, tmp56, tmp54) tmp58 = tmp34 > tmp32 tmp59 = tl.full([1], 7, tl.int8) tmp60 = tl.where(tmp58, tmp59, tmp57) tmp61 = tmp37 > tmp35 tmp62 = tl.full([1], 8, tl.int8) tmp63 = tl.where(tmp61, tmp62, tmp60) tmp64 = tl.load(in_ptr0 + (-3 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp65 = tl.load(in_ptr0 + (-2 + 4 * x2), tmp11 & xmask, eviction_policy ='evict_last', other=0.0) tmp66 = tmp65 + tmp64 tmp67 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp18 & xmask, eviction_policy ='evict_last', other=0.0) tmp68 = tmp67 + tmp66 tmp69 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp21 & xmask, eviction_policy ='evict_last', other=0.0) tmp70 = tmp69 + tmp68 tmp71 = tl.load(in_ptr0 + 4 * x2, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp72 = tmp71 + tmp70 tmp73 = tl.load(in_ptr0 + (1 + 4 * x2), tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp74 = tmp73 + tmp72 tmp75 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp76 = tmp75 + tmp74 tmp77 = tl.load(in_ptr0 + (2 + 4 * x2), tmp33 & xmask, eviction_policy= 'evict_last', other=0.0) tmp78 = tmp77 + tmp76 tmp79 = tl.load(in_ptr0 + (3 + 4 * x2), tmp36 & xmask, eviction_policy= 'evict_last', other=0.0) tmp80 = tmp79 + tmp78 tmp81 = tl.full([1], 9, tl.int32) tmp82 = tmp80 / tmp81 tl.store(out_ptr0 + (x0 + 128 * x1), tmp38, xmask) tl.store(out_ptr1 + x2, tmp63, xmask) tl.store(out_ptr2 + (x0 + 128 * x1), tmp82, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 0, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x5 = xindex // 4 x2 = xindex // 4 % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x6, xmask) tmp26 = tl.load(in_ptr7 + x2, xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 1, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tl.where(tmp7, tmp6, tmp5) tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.1 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp18 = tmp17 + tmp1 tmp19 = tmp17 < 0 tl.where(tmp19, tmp18, tmp17) tmp21 = tmp16 - tmp16 tmp23 = tmp21 * tmp22 tmp24 = tmp16 + tmp23 tmp27 = tmp25 + tmp26 tmp28 = tmp27 > tmp12 tmp29 = tmp27 * tmp14 tmp30 = tl.where(tmp28, tmp27, tmp29) tmp32 = tmp31 + tmp1 tmp33 = tmp31 < 0 tl.where(tmp33, tmp32, tmp31) tmp35 = tmp24 - tmp24 tmp37 = tmp35 * tmp36 tmp38 = tmp24 + tmp37 tmp39 = tmp30 + tmp38 tmp40 = tmp30 > tmp12 tl.store(in_out_ptr0 + x6, tmp39, xmask) tl.store(out_ptr0 + x6, tmp40, xmask) @triton.jit def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = triton_helpers.minimum(tmp10, tmp9) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 4 x0 = xindex % 4 x6 = xindex // 16 x2 = xindex // 16 % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.1 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp18 = tmp17 + tmp1 tmp19 = tmp17 < 0 tmp20 = tl.where(tmp19, tmp18, tmp17) tmp21 = tl.load(in_ptr2 + (tmp20 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp10 tmp23 = tmp22 > tmp12 tmp24 = tmp22 * tmp14 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tmp25 - tmp16 tmp28 = tmp26 * tmp27 tmp29 = tmp16 + tmp28 tmp31 = tmp30 + tmp1 tmp32 = tmp30 < 0 tmp33 = tl.where(tmp32, tmp31, tmp30) tmp34 = tl.load(in_ptr2 + (tmp8 + 2 * tmp33 + 4 * x6), None, eviction_policy='evict_last') tmp35 = tmp34 + tmp10 tmp36 = tmp35 > tmp12 tmp37 = tmp35 * tmp14 tmp38 = tl.where(tmp36, tmp35, tmp37) tmp39 = tl.load(in_ptr2 + (tmp20 + 2 * tmp33 + 4 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp10 tmp41 = tmp40 > tmp12 tmp42 = tmp40 * tmp14 tmp43 = tl.where(tmp41, tmp40, tmp42) tmp44 = tmp43 - tmp38 tmp45 = tmp44 * tmp27 tmp46 = tmp38 + tmp45 tmp47 = tmp46 - tmp29 tmp49 = tmp47 * tmp48 tmp50 = tmp29 + tmp49 tl.store(in_out_ptr0 + x4, tmp50, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + x3, None) tmp13 = tl.load(in_out_ptr1 + x3, None) tmp14 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp9 = tl.sigmoid(tmp8) tmp10 = tmp7 * tmp9 tmp11 = 2.0 tmp12 = tmp10 * tmp11 tmp15 = tmp13 + tmp14 tmp16 = tmp12 + tmp15 tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(in_out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (4, 5, 64, 4, 4), (5120, 1024, 16, 4, 1)) assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 320, 1, 1), (320, 1, 1, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 320, 1, 1), (320, 1, 1, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_27, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4096)](primals_1, buf0, 4096, XBLOCK= 128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(4096)](buf2, primals_3, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (20, 64, 4, 4), (1024, 16, 4, 1), 0), primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (20, 64, 4, 4), (1024, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(20480)](buf4, primals_5, 20480, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf15 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf10 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 0) buf11 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 16) buf12 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 32) buf13 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 48) buf14 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 64) triton_per_fused_cat_mul_sum_3[grid(64)](buf4, buf2, buf10, buf11, buf12, buf13, buf14, 64, 64, XBLOCK=32, num_warps=8, num_stages=1) buf16 = empty_strided_cuda((4, 320, 4, 4), (5120, 16, 4, 1), torch. float32) triton_poi_fused_mul_4[grid(20480)](primals_1, buf15, buf16, 20480, XBLOCK=128, num_warps=4, num_stages=1) buf17 = extern_kernels.convolution(buf16, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 4, 4), (1024, 16, 4, 1)) buf19 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 64, 4, 4), (1024, 16, 4, 1)) buf20 = buf19 del buf19 triton_poi_fused_convolution_leaky_relu_5[grid(4096)](buf20, primals_9, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf24 = empty_strided_cuda((4, 128, 2, 2), (512, 4, 2, 1), torch. float32) buf21 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 0) buf22 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.int8) buf23 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 256) triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6[grid(1024)](buf20 , buf21, buf22, buf23, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf25 = extern_kernels.convolution(buf24, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 64, 2, 2), (256, 4, 2, 1)) buf26 = buf25 del buf25 triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf26, primals_11, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf27 = extern_kernels.convolution(buf26, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 64, 2, 2), (256, 4, 2, 1)) buf28 = buf27 del buf27 triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf28, primals_13, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf32 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch. float32) buf29 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 0) buf30 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.int8) buf31 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 64) triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8[grid(256)](buf28, buf29, buf30, buf31, 256, XBLOCK=128, num_warps=4, num_stages=1) buf33 = extern_kernels.convolution(buf32, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 64, 1, 1), (64, 1, 1, 1)) buf34 = buf33 del buf33 triton_poi_fused_convolution_leaky_relu_9[grid(256)](buf34, primals_15, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf35 = extern_kernels.convolution(buf34, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 64, 1, 1), (64, 1, 1, 1)) buf36 = empty_strided_cuda((2, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_10[grid(2)](buf36, 2, XBLOCK=2, num_warps =1, num_stages=1) buf37 = empty_strided_cuda((2, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_11[grid(2)](buf37, 2, XBLOCK=2, num_warps=1, num_stages=1) buf38 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused__to_copy_10[grid(2)](buf38, 2, XBLOCK=2, num_warps =1, num_stages=1) buf39 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused_add_clamp_11[grid(2)](buf39, 2, XBLOCK=2, num_warps=1, num_stages=1) buf40 = empty_strided_cuda((2,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(2)](buf40, 2, XBLOCK=2, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((2, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(2)](buf42, 2, XBLOCK=2, num_warps=1, num_stages=1) buf43 = extern_kernels.convolution(buf26, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 64, 2, 2), (256, 4, 2, 1)) buf41 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.float32 ) buf44 = buf41 del buf41 buf62 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool) triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13[ grid(1024)](buf44, buf36, buf38, buf35, primals_17, buf39, buf40, buf43, primals_19, buf37, buf42, buf62, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf43 del primals_19 buf45 = extern_kernels.convolution(buf44, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 64, 2, 2), (256, 4, 2, 1)) buf46 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_14[grid(4)](buf46, 4, XBLOCK=4, num_warps =1, num_stages=1) buf47 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_15[grid(4)](buf47, 4, XBLOCK=4, num_warps=1, num_stages=1) buf48 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_14[grid(4)](buf48, 4, XBLOCK=4, num_warps =1, num_stages=1) buf49 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_15[grid(4)](buf49, 4, XBLOCK=4, num_warps=1, num_stages=1) buf50 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16[grid(4)](buf50, 4, XBLOCK=4, num_warps=1, num_stages=1) buf52 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16[grid(4)](buf52, 4, XBLOCK=4, num_warps=1, num_stages=1) buf53 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) buf54 = buf53 del buf53 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17[ grid(4096)](buf54, buf46, buf48, buf45, primals_21, buf49, buf50, buf47, buf52, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf55 = extern_kernels.convolution(buf54, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 64, 4, 4), (1024, 16, 4, 1)) buf56 = buf55 del buf55 triton_poi_fused_convolution_1[grid(4096)](buf56, primals_23, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf57 = extern_kernels.convolution(buf56, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 64, 4, 4), (1024, 16, 4, 1)) buf58 = buf57 del buf57 triton_poi_fused_convolution_leaky_relu_5[grid(4096)](buf58, primals_25, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf59 = extern_kernels.convolution(buf58, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 64, 4, 4), (1024, 16, 4, 1)) buf18 = buf17 del buf17 buf60 = buf59 del buf59 triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18[grid(4096)]( buf18, buf60, primals_7, buf56, primals_27, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 del primals_7 buf61 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19[grid (1024)](buf45, primals_21, buf61, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf45 del primals_21 buf63 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20[grid (256)](buf35, primals_17, buf63, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf35 del primals_17 return (buf60, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, buf0, buf2, reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 1024), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 2048), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 3072), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 4096), buf15, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30, buf32, buf34, buf36, buf37, buf38, buf39, buf40, buf42, buf44, buf46, buf47, buf48, buf49, buf50, buf52, buf54, buf56, buf58, buf61, buf62, buf63) class TSAFusionNew(nn.Module): """Temporal Spatial Attention (TSA) fusion module. Temporal: Calculate the correlation between center frame and neighboring frames; Spatial: It has 3 pyramid levels, the attention is similar to SFT. (SFT: Recovering realistic texture in image super-resolution by deep spatial feature transform.) Args: num_feat (int): Channel number of middle features. Default: 64. num_frame (int): Number of frames. Default: 5. center_frame_idx (int): The index of center frame. Default: 2. """ def __init__(self, num_feat=64, num_frame=5, center_frame_idx=2): super(TSAFusionNew, self).__init__() self.center_frame_idx = center_frame_idx self.temporal_attn1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.temporal_attn2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.feat_fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1) self.max_pool = nn.MaxPool2d(3, stride=2, padding=1) self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1) self.spatial_attn1 = nn.Conv2d(num_frame * num_feat, num_feat, 1) self.spatial_attn2 = nn.Conv2d(num_feat * 2, num_feat, 1) self.spatial_attn3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn4 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn5 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn_l1 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn_l2 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1) self.spatial_attn_l3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.spatial_attn_add1 = nn.Conv2d(num_feat, num_feat, 1) self.spatial_attn_add2 = nn.Conv2d(num_feat, num_feat, 1) self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, input_0): primals_2 = self.temporal_attn1.weight primals_3 = self.temporal_attn1.bias primals_4 = self.temporal_attn2.weight primals_5 = self.temporal_attn2.bias primals_6 = self.feat_fusion.weight primals_7 = self.feat_fusion.bias primals_8 = self.spatial_attn1.weight primals_9 = self.spatial_attn1.bias primals_10 = self.spatial_attn2.weight primals_11 = self.spatial_attn2.bias primals_16 = self.spatial_attn3.weight primals_13 = self.spatial_attn3.bias primals_12 = self.spatial_attn4.weight primals_15 = self.spatial_attn4.bias primals_18 = self.spatial_attn5.weight primals_17 = self.spatial_attn5.bias primals_20 = self.spatial_attn_l1.weight primals_19 = self.spatial_attn_l1.bias primals_14 = self.spatial_attn_l2.weight primals_21 = self.spatial_attn_l2.bias primals_22 = self.spatial_attn_l3.weight primals_23 = self.spatial_attn_l3.bias primals_24 = self.spatial_attn_add1.weight primals_25 = self.spatial_attn_add1.bias primals_26 = self.spatial_attn_add2.weight primals_27 = self.spatial_attn_add2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
hyunobae/BasicSR
TSAFusion
false
12,598
[ "Apache-2.0" ]
0
f2c2fc6cf28933658816c808f55c95fa20b16483
https://github.com/hyunobae/BasicSR/tree/f2c2fc6cf28933658816c808f55c95fa20b16483
CommunicationLayer
import torch from torch import nn class AffineTransform(nn.Module): def __init__(self, num_features): super().__init__() self.alpha = nn.Parameter(torch.ones(1, 1, num_features)) self.beta = nn.Parameter(torch.zeros(1, 1, num_features)) def forward(self, x): return self.alpha * x + self.beta class CommunicationLayer(nn.Module): def __init__(self, num_features, num_patches): super().__init__() self.aff1 = AffineTransform(num_features) self.fc1 = nn.Linear(num_patches, num_patches) self.aff2 = AffineTransform(num_features) def forward(self, x): x = self.aff1(x) residual = x x = self.fc1(x.transpose(1, 2)).transpose(1, 2) x = self.aff2(x) out = x + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'num_patches': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr5 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp10 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp6 + tmp11 tl.store(out_ptr0 + x4, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_7, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 4, 16, 1), torch.float32) triton_poi_fused_add_mul_1[grid(256)](primals_6, buf1, primals_5, primals_7, primals_1, primals_2, primals_3, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 del primals_3 del primals_7 return buf2, primals_2, primals_5, primals_6, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, primals_4 class AffineTransform(nn.Module): def __init__(self, num_features): super().__init__() self.alpha = nn.Parameter(torch.ones(1, 1, num_features)) self.beta = nn.Parameter(torch.zeros(1, 1, num_features)) def forward(self, x): return self.alpha * x + self.beta class CommunicationLayerNew(nn.Module): def __init__(self, num_features, num_patches): super().__init__() self.aff1 = AffineTransform(num_features) self.fc1 = nn.Linear(num_patches, num_patches) self.aff2 = AffineTransform(num_features) def forward(self, input_0): primals_1 = self.aff1.alpha primals_3 = self.aff1.beta primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.aff2.alpha primals_7 = self.aff2.beta primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jaketae/res-mlp
CommunicationLayer
false
12,599
[ "MIT" ]
0
6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd
https://github.com/jaketae/res-mlp/tree/6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd
FeedForward
import torch from torch import nn from torch.nn import functional as F class AffineTransform(nn.Module): def __init__(self, num_features): super().__init__() self.alpha = nn.Parameter(torch.ones(1, 1, num_features)) self.beta = nn.Parameter(torch.zeros(1, 1, num_features)) def forward(self, x): return self.alpha * x + self.beta class FeedForward(nn.Module): def __init__(self, num_features, expansion_factor): super().__init__() num_hidden = num_features * expansion_factor self.aff1 = AffineTransform(num_features) self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.aff2 = AffineTransform(num_features) def forward(self, x): x = self.aff1(x) residual = x x = self.fc1(x) x = F.gelu(x) x = self.fc2(x) x = self.aff2(x) out = x + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'expansion_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_gelu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_9, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_gelu_1[grid(1024)](buf1, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_2[grid(256)](primals_8, buf3, primals_9, buf0, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 return buf4, primals_2, primals_8, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 16), (16, 1), 0 ), buf3, primals_6, primals_4 class AffineTransform(nn.Module): def __init__(self, num_features): super().__init__() self.alpha = nn.Parameter(torch.ones(1, 1, num_features)) self.beta = nn.Parameter(torch.zeros(1, 1, num_features)) def forward(self, x): return self.alpha * x + self.beta class FeedForwardNew(nn.Module): def __init__(self, num_features, expansion_factor): super().__init__() num_hidden = num_features * expansion_factor self.aff1 = AffineTransform(num_features) self.fc1 = nn.Linear(num_features, num_hidden) self.fc2 = nn.Linear(num_hidden, num_features) self.aff2 = AffineTransform(num_features) def forward(self, input_0): primals_1 = self.aff1.alpha primals_3 = self.aff1.beta primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_8 = self.aff2.alpha primals_9 = self.aff2.beta primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
jaketae/res-mlp
FeedForward
false
12,600
[ "MIT" ]
0
6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd
https://github.com/jaketae/res-mlp/tree/6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd
DilatedResidualLayer
import torch import torch.nn as nn import torch.nn.functional as F class DilatedResidualLayer(nn.Module): def __init__(self, dilation, in_channels, out_channels): super(DilatedResidualLayer, self).__init__() self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding =dilation, dilation=dilation) self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1) self.dropout = nn.Dropout() def forward(self, x, mask): out = F.relu(self.conv_dilated(x)) out = self.conv_1x1(out) out = self.dropout(out) return (x + out) * mask[:, 0:1, :] def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dilation': 1, 'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 * tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_add_convolution_mul_1[grid(64)](buf3, primals_3, primals_5, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, reinterpret_tensor( primals_6, (4, 1, 4), (16, 4, 1), 0) class DilatedResidualLayerNew(nn.Module): def __init__(self, dilation, in_channels, out_channels): super(DilatedResidualLayerNew, self).__init__() self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding =dilation, dilation=dilation) self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1) self.dropout = nn.Dropout() def forward(self, input_0, input_1): primals_1 = self.conv_dilated.weight primals_2 = self.conv_dilated.bias primals_4 = self.conv_1x1.weight primals_5 = self.conv_1x1.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jeanq1/sign-segmentation
DilatedResidualLayer
false
12,601
[ "MIT" ]
0
cbf1203b06e82e75e06b96a430dab08da3a46f7b
https://github.com/jeanq1/sign-segmentation/tree/cbf1203b06e82e75e06b96a430dab08da3a46f7b
MainClassifier
import torch import torch.nn as nn class MainClassifier(nn.Module): def __init__(self, channel, num_classes=100): super(MainClassifier, self).__init__() self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(channel, num_classes) def forward(self, x): x = self.pool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (100, 4), (4, 1)) assert_size_stride(primals_3, (100,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 100), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 return buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0) class MainClassifierNew(nn.Module): def __init__(self, channel, num_classes=100): super(MainClassifierNew, self).__init__() self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(channel, num_classes) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
janhenriklambrechts/Task-Oriented-Feature-Distillation
MainClassifier
false
12,602
[ "MIT" ]
0
87ab75677b02441bce045e76e96afb078e9df2ea
https://github.com/janhenriklambrechts/Task-Oriented-Feature-Distillation/tree/87ab75677b02441bce045e76e96afb078e9df2ea
Net
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.fc1 = torch.nn.Linear(3 * 16 * 16, 64) self.fc2 = torch.nn.Linear(64, 10) def forward(self, x): x = self.pool(x) x = x.view(-1, 3 * 16 * 16) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 24, 24])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 24, 24), (2304, 576, 24, 1)) assert_size_stride(primals_2, (64, 768), (768, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (10, 64), (64, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch. float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(2304)](primals_1, buf0, 2304, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((3, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (3, 768), (768, 1), 0), reinterpret_tensor(primals_2, (768, 64), (1, 768), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(192)](buf2, primals_3, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((3, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf3) del primals_5 return buf3, reinterpret_tensor(buf0, (3, 768), (768, 1), 0 ), buf2, primals_4 class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.fc1 = torch.nn.Linear(3 * 16 * 16, 64) self.fc2 = torch.nn.Linear(64, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jcolekaplan/computer_vision
Net
false
12,603
[ "MIT" ]
0
48d39b081a7b6b699019052eeae36ab703bb34eb
https://github.com/jcolekaplan/computer_vision/tree/48d39b081a7b6b699019052eeae36ab703bb34eb
MultiHeadAttention
import torch import torch.nn as nn class ScaledDotProductAttention(nn.Module): def __init__(self, d_k): super(ScaledDotProductAttention, self).__init__() self.scale = d_k ** -0.5 def forward(self, q, k, v, mask): x = torch.matmul(q, k.transpose(-2, -1)) x = x if mask is None else x.masked_fill(mask, float('-inf')) x = torch.matmul(torch.softmax(self.scale * x, dim=-1), v) return x class MultiHeadAttention(nn.Module): def __init__(self, d_model, d_k, d_v, h): super(MultiHeadAttention, self).__init__() self.h = h self.d_k = d_k self.d_v = d_v self.w_q = nn.Linear(d_model, h * d_k, bias=False) self.w_k = nn.Linear(d_model, h * d_k, bias=False) self.w_v = nn.Linear(d_model, h * d_v, bias=False) self.w_o = nn.Linear(h * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(d_k) def _split_into_heads(self, *xs): return [x.view(x.size(0), x.size(1), self.h, -1).transpose(1, 2) for x in xs] def forward(self, q, k, v, mask=None): q, k, v = self.w_q(q), self.w_k(k), self.w_v(v) q, k, v = self._split_into_heads(q, k, v) x = self.attention(q, k, v, mask) x = x.transpose(1, 2).reshape(x.size(0), x.size(2), -1) x = self.w_o(x) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf0, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused_clone_0[grid(256)](buf2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf11) return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0 ), primals_7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttention(nn.Module): def __init__(self, d_k): super(ScaledDotProductAttention, self).__init__() self.scale = d_k ** -0.5 def forward(self, q, k, v, mask): x = torch.matmul(q, k.transpose(-2, -1)) x = x if mask is None else x.masked_fill(mask, float('-inf')) x = torch.matmul(torch.softmax(self.scale * x, dim=-1), v) return x class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, d_k, d_v, h): super(MultiHeadAttentionNew, self).__init__() self.h = h self.d_k = d_k self.d_v = d_v self.w_q = nn.Linear(d_model, h * d_k, bias=False) self.w_k = nn.Linear(d_model, h * d_k, bias=False) self.w_v = nn.Linear(d_model, h * d_v, bias=False) self.w_o = nn.Linear(h * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(d_k) def _split_into_heads(self, *xs): return [x.view(x.size(0), x.size(1), self.h, -1).transpose(1, 2) for x in xs] def forward(self, input_0, input_1, input_2): primals_1 = self.w_q.weight primals_3 = self.w_k.weight primals_5 = self.w_v.weight primals_7 = self.w_o.weight primals_2 = input_0 primals_4 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jaehyek/attention-is-all-you-need
MultiHeadAttention
false
12,604
[ "MIT" ]
0
9b421f7c98414aeb9f397c5195e3a6a9080a4669
https://github.com/jaehyek/attention-is-all-you-need/tree/9b421f7c98414aeb9f397c5195e3a6a9080a4669
PositionwiseFeedForward
import torch import torch.nn as nn import torch.nn.functional as F class PositionwiseFeedForward(nn.Module): """Implements FFN equation.""" def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.norm = nn.Sequential() self.w_2 = nn.Linear(d_ff, d_model) self.dropout = None def forward(self, x): return self.w_2(self.norm(F.relu(self.w_1(x)).transpose(2, 1). contiguous()).transpose(2, 1).contiguous()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clone_relu_threshold_backward_0[grid(256)](buf0, primals_2, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = buf0 del buf0 extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3 class PositionwiseFeedForwardNew(nn.Module): """Implements FFN equation.""" def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.norm = nn.Sequential() self.w_2 = nn.Linear(d_ff, d_model) self.dropout = None def forward(self, input_0): primals_1 = self.w_1.weight primals_2 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jetd1/dcp
PositionwiseFeedForward
false
12,605
[ "MIT" ]
0
2fe7256a14bf382f1ea0a9e1df6d52ff21a99a4d
https://github.com/jetd1/dcp/tree/2fe7256a14bf382f1ea0a9e1df6d52ff21a99a4d
Tanh
import math import torch class Tanh(torch.nn.Tanh): """ Class that extends ``torch.nn.Tanh`` additionally computing the log diagonal blocks of the Jacobian. """ def forward(self, inputs, grad: 'torch.Tensor'=None): """ Parameters ---------- inputs : ``torch.Tensor``, required. The input tensor. grad : ``torch.Tensor``, optional (default = None). The log diagonal blocks of the partial Jacobian of previous transformations. Returns ------- The output tensor and the log diagonal blocks of the partial log-Jacobian of previous transformations combined with this transformation. """ g = -2 * (inputs - math.log(2) + torch.nn.functional.softplus(-2 * inputs)) return torch.tanh(inputs), g.view(grad.shape ) + grad if grad is not None else g def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_softplus_sub_tanh_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 0.6931471805599453 tmp3 = tmp0 - tmp2 tmp4 = -2.0 tmp5 = tmp0 * tmp4 tmp6 = 20.0 tmp7 = tmp5 > tmp6 tmp8 = tl_math.exp(tmp5) tmp9 = libdevice.log1p(tmp8) tmp10 = tl.where(tmp7, tmp5, tmp9) tmp11 = tmp3 + tmp10 tmp12 = tmp11 * tmp4 tl.store(out_ptr0 + x0, tmp1, xmask) tl.store(out_ptr1 + x0, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_softplus_sub_tanh_0[grid(256)](arg0_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, buf1 class TanhNew(torch.nn.Tanh): """ Class that extends ``torch.nn.Tanh`` additionally computing the log diagonal blocks of the Jacobian. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
gndctrl2mjrtm/BNAF
Tanh
false
12,606
[ "MIT" ]
0
a8ecaa2844b5338f9091e58dd571fdc6a598e2f1
https://github.com/gndctrl2mjrtm/BNAF/tree/a8ecaa2844b5338f9091e58dd571fdc6a598e2f1
Model
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, n_input_features): super(Model, self).__init__() self.linear = nn.Linear(n_input_features, 1) def forward(self, x): y_pred = torch.sigmoid(self.linear(x)) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_input_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class ModelNew(nn.Module): def __init__(self, n_input_features): super(ModelNew, self).__init__() self.linear = nn.Linear(n_input_features, 1) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jaykasundra2/pytorchTutorial
Model
false
12,607
[ "MIT" ]
0
954a96797353d463cb96c66596272e180c602134
https://github.com/jaykasundra2/pytorchTutorial/tree/954a96797353d463cb96c66596272e180c602134
NeuralNet1
import torch import torch.nn as nn class NeuralNet1(nn.Module): def __init__(self, input_size, hidden_size): super(NeuralNet1, self).__init__() self.linear1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.linear2 = nn.Linear(hidden_size, 1) def forward(self, x): out = self.linear1(x) out = self.relu(out) out = self.linear2(out) y_pred = torch.sigmoid(out) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4 class NeuralNet1New(nn.Module): def __init__(self, input_size, hidden_size): super(NeuralNet1New, self).__init__() self.linear1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.linear2 = nn.Linear(hidden_size, 1) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jaykasundra2/pytorchTutorial
NeuralNet1
false
12,608
[ "MIT" ]
0
954a96797353d463cb96c66596272e180c602134
https://github.com/jaykasundra2/pytorchTutorial/tree/954a96797353d463cb96c66596272e180c602134
ModMBStddevLayer
import torch import torch.nn as nn import torch.distributed as dist import torch.autograd as autograd class AllGatherLayer(autograd.Function): """All gather layer with backward propagation path. Indeed, this module is to make ``dist.all_gather()`` in the backward graph. Such kind of operation has been widely used in Moco and other contrastive learning algorithms. """ @staticmethod def forward(ctx, x): """Forward function.""" ctx.save_for_backward(x) output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] dist.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grad_outputs): """Backward function.""" x, = ctx.saved_tensors grad_out = torch.zeros_like(x) grad_out = grad_outputs[dist.get_rank()] return grad_out class ModMBStddevLayer(nn.Module): """Modified MiniBatch Stddev Layer. This layer is modified from ``MiniBatchStddevLayer`` used in PGGAN. In StyleGAN2, the authors add a new feature, `channel_groups`, into this layer. Note that to accelerate the training procedure, we also add a new feature of ``sync_std`` to achieve multi-nodes/machine training. This feature is still in beta version and we have tested it on 256 scales. Args: group_size (int, optional): The size of groups in batch dimension. Defaults to 4. channel_groups (int, optional): The size of groups in channel dimension. Defaults to 1. sync_std (bool, optional): Whether to use synchronized std feature. Defaults to False. sync_groups (int | None, optional): The size of groups in node dimension. Defaults to None. eps (float, optional): Epsilon value to avoid computation error. Defaults to 1e-8. """ def __init__(self, group_size=4, channel_groups=1, sync_std=False, sync_groups=None, eps=1e-08): super().__init__() self.group_size = group_size self.eps = eps self.channel_groups = channel_groups self.sync_std = sync_std self.sync_groups = group_size if sync_groups is None else sync_groups if self.sync_std: assert torch.distributed.is_initialized( ), 'Only in distributed training can the sync_std be activated.' mmcv.print_log('Adopt synced minibatch stddev layer', 'mmgen') def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map with shape of (N, C+1, H, W). """ if self.sync_std: all_features = torch.cat(AllGatherLayer.apply(x), dim=0) rank, ws = get_dist_info() local_bs = all_features.shape[0] // ws start_idx = local_bs * rank if start_idx + self.sync_groups > all_features.shape[0]: start_idx = all_features.shape[0] - self.sync_groups end_idx = min(local_bs * rank + self.sync_groups, all_features. shape[0]) x = all_features[start_idx:end_idx] assert x.shape[0] <= self.group_size or x.shape[0 ] % self.group_size == 0, f'Batch size be smaller than or equal to group size. Otherwise, batch size should be divisible by the group size.But got batch size {x.shape[0]}, group size {self.group_size}' assert x.shape[1 ] % self.channel_groups == 0, f'"channel_groups" must be divided by the feature channels. channel_groups: {self.channel_groups}, feature channels: {x.shape[1]}' n, c, h, w = x.shape group_size = min(n, self.group_size) y = torch.reshape(x, (group_size, -1, self.channel_groups, c // self.channel_groups, h, w)) y = torch.var(y, dim=0, unbiased=False) y = torch.sqrt(y + self.eps) y = y.mean(dim=(2, 3, 4), keepdim=True).squeeze(2) y = y.repeat(group_size, 1, h, w) return torch.cat([x, y], dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.distributed as dist import torch.autograd as autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_repeat_sqrt_var_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp28, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_mean_repeat_sqrt_var_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class AllGatherLayer(autograd.Function): """All gather layer with backward propagation path. Indeed, this module is to make ``dist.all_gather()`` in the backward graph. Such kind of operation has been widely used in Moco and other contrastive learning algorithms. """ @staticmethod def forward(ctx, x): """Forward function.""" ctx.save_for_backward(x) output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] dist.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grad_outputs): """Backward function.""" x, = ctx.saved_tensors grad_out = torch.zeros_like(x) grad_out = grad_outputs[dist.get_rank()] return grad_out class ModMBStddevLayerNew(nn.Module): """Modified MiniBatch Stddev Layer. This layer is modified from ``MiniBatchStddevLayer`` used in PGGAN. In StyleGAN2, the authors add a new feature, `channel_groups`, into this layer. Note that to accelerate the training procedure, we also add a new feature of ``sync_std`` to achieve multi-nodes/machine training. This feature is still in beta version and we have tested it on 256 scales. Args: group_size (int, optional): The size of groups in batch dimension. Defaults to 4. channel_groups (int, optional): The size of groups in channel dimension. Defaults to 1. sync_std (bool, optional): Whether to use synchronized std feature. Defaults to False. sync_groups (int | None, optional): The size of groups in node dimension. Defaults to None. eps (float, optional): Epsilon value to avoid computation error. Defaults to 1e-8. """ def __init__(self, group_size=4, channel_groups=1, sync_std=False, sync_groups=None, eps=1e-08): super().__init__() self.group_size = group_size self.eps = eps self.channel_groups = channel_groups self.sync_std = sync_std self.sync_groups = group_size if sync_groups is None else sync_groups if self.sync_std: assert torch.distributed.is_initialized( ), 'Only in distributed training can the sync_std be activated.' mmcv.print_log('Adopt synced minibatch stddev layer', 'mmgen') def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jiangwenj02/mmgeneration
ModMBStddevLayer
false
12,609
[ "Apache-2.0" ]
0
da9ad377ae19260467fc332ddb88f505c38a915a
https://github.com/jiangwenj02/mmgeneration/tree/da9ad377ae19260467fc332ddb88f505c38a915a
MiniBatchStddevLayer
import torch import torch.nn as nn import torch.distributed as dist import torch.autograd as autograd class AllGatherLayer(autograd.Function): """All gather layer with backward propagation path. Indeed, this module is to make ``dist.all_gather()`` in the backward graph. Such kind of operation has been widely used in Moco and other contrastive learning algorithms. """ @staticmethod def forward(ctx, x): """Forward function.""" ctx.save_for_backward(x) output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] dist.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grad_outputs): """Backward function.""" x, = ctx.saved_tensors grad_out = torch.zeros_like(x) grad_out = grad_outputs[dist.get_rank()] return grad_out class MiniBatchStddevLayer(nn.Module): """Minibatch standard deviation. Args: group_size (int, optional): The size of groups in batch dimension. Defaults to 4. eps (float, optional): Epsilon value to avoid computation error. Defaults to 1e-8. gather_all_batch (bool, optional): Whether gather batch from all GPUs. Defaults to False. """ def __init__(self, group_size=4, eps=1e-08, gather_all_batch=False): super().__init__() self.group_size = group_size self.eps = eps self.gather_all_batch = gather_all_batch if self.gather_all_batch: assert torch.distributed.is_initialized( ), 'Only in distributed training can the tensors be all gathered.' def forward(self, x): """Forward function. Args: x (Tensor): Input tensor with shape (n, c, h, w). Returns: Tensor: Forward results. """ if self.gather_all_batch: x = torch.cat(AllGatherLayer.apply(x), dim=0) assert x.shape[0] <= self.group_size or x.shape[0 ] % self.group_size == 0, f'Batch size be smaller than or equal to group size. Otherwise, batch size should be divisible by the group size.But got batch size {x.shape[0]}, group size {self.group_size}' n, c, h, w = x.shape group_size = min(n, self.group_size) y = torch.reshape(x, (group_size, -1, c, h, w)) y = y - y.mean(dim=0, keepdim=True) y = y.pow(2).mean(dim=0, keepdim=False) y = torch.sqrt(y + self.eps) y = y.mean(dim=(1, 2, 3), keepdim=True) y = y.repeat(group_size, 1, h, w) return torch.cat([x, y], dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.distributed as dist import torch.autograd as autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp28, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class AllGatherLayer(autograd.Function): """All gather layer with backward propagation path. Indeed, this module is to make ``dist.all_gather()`` in the backward graph. Such kind of operation has been widely used in Moco and other contrastive learning algorithms. """ @staticmethod def forward(ctx, x): """Forward function.""" ctx.save_for_backward(x) output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] dist.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grad_outputs): """Backward function.""" x, = ctx.saved_tensors grad_out = torch.zeros_like(x) grad_out = grad_outputs[dist.get_rank()] return grad_out class MiniBatchStddevLayerNew(nn.Module): """Minibatch standard deviation. Args: group_size (int, optional): The size of groups in batch dimension. Defaults to 4. eps (float, optional): Epsilon value to avoid computation error. Defaults to 1e-8. gather_all_batch (bool, optional): Whether gather batch from all GPUs. Defaults to False. """ def __init__(self, group_size=4, eps=1e-08, gather_all_batch=False): super().__init__() self.group_size = group_size self.eps = eps self.gather_all_batch = gather_all_batch if self.gather_all_batch: assert torch.distributed.is_initialized( ), 'Only in distributed training can the tensors be all gathered.' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jiangwenj02/mmgeneration
MiniBatchStddevLayer
false
12,610
[ "Apache-2.0" ]
0
da9ad377ae19260467fc332ddb88f505c38a915a
https://github.com/jiangwenj02/mmgeneration/tree/da9ad377ae19260467fc332ddb88f505c38a915a
PixelNorm
import torch import torch.nn as nn def pixel_norm(x, eps=1e-06): """Pixel Normalization. This normalization is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: x (torch.Tensor): Tensor to be normalized. eps (float, optional): Epsilon to avoid dividing zero. Defaults to 1e-6. Returns: torch.Tensor: Normalized tensor. """ if torch.__version__ >= '1.7.0': norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True) else: norm = torch.norm(x, p=2, dim=1, keepdim=True) norm = norm / torch.sqrt(torch.tensor(x.shape[1])) return x / (norm + eps) class PixelNorm(nn.Module): """Pixel Normalization. This module is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: eps (float, optional): Epsilon value. Defaults to 1e-6. """ _abbr_ = 'pn' def __init__(self, in_channels=None, eps=1e-06): super().__init__() self.eps = eps def forward(self, x): """Forward function. Args: x (torch.Tensor): Tensor to be normalized. Returns: torch.Tensor: Normalized tensor. """ return pixel_norm(x, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 0.5 tmp14 = tmp12 * tmp13 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tmp0 / tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, def pixel_norm(x, eps=1e-06): """Pixel Normalization. This normalization is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: x (torch.Tensor): Tensor to be normalized. eps (float, optional): Epsilon to avoid dividing zero. Defaults to 1e-6. Returns: torch.Tensor: Normalized tensor. """ if torch.__version__ >= '1.7.0': norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True) else: norm = torch.norm(x, p=2, dim=1, keepdim=True) norm = norm / torch.sqrt(torch.tensor(x.shape[1])) return x / (norm + eps) class PixelNormNew(nn.Module): """Pixel Normalization. This module is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: eps (float, optional): Epsilon value. Defaults to 1e-6. """ _abbr_ = 'pn' def __init__(self, in_channels=None, eps=1e-06): super().__init__() self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jiangwenj02/mmgeneration
PixelNorm
false
12,611
[ "Apache-2.0" ]
0
da9ad377ae19260467fc332ddb88f505c38a915a
https://github.com/jiangwenj02/mmgeneration/tree/da9ad377ae19260467fc332ddb88f505c38a915a
AuxiliaryConvolutions
import torch from torch import nn import torch.nn.functional as F from itertools import product as product import torch.optim import torch.utils.data class AuxiliaryConvolutions(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutions, self).__init__() self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv7_feats): """ Forward propagation. :param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19) :return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2 """ out = F.relu(self.conv8_1(conv7_feats)) out = F.relu(self.conv8_2(out)) conv8_2_feats = out out = F.relu(self.conv9_1(out)) out = F.relu(self.conv9_2(out)) conv9_2_feats = out out = F.relu(self.conv10_1(out)) out = F.relu(self.conv10_2(out)) conv10_2_feats = out out = F.relu(self.conv11_1(out)) conv11_2_feats = F.relu(self.conv11_2(out)) return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats def get_inputs(): return [torch.rand([4, 1024, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from itertools import product as product import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None) tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 524288 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 1024 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 524288 * y1), tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 256 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 65536 * y1), tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 196 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 50176 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 196 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 50176 * y1), tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 144 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 36864 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 144 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 36864 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_4, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (128, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(4096, 4096)](primals_3, buf0, 4096, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(131072, 9)](primals_4, buf1, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(32768, 9)](primals_8, buf2, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(32768, 9)](primals_12, buf3, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(32768, 9)](primals_16, buf4, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf5 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_3[grid(4194304)](buf6, primals_2, 4194304, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 512, 32, 32), (524288, 1, 16384, 512)) buf8 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32) buf9 = empty_strided_cuda((4, 512, 32, 32), (524288, 1, 16384, 512), torch.float32) triton_poi_fused_convolution_relu_4[grid(2048, 1024)](buf7, primals_5, buf8, buf9, 2048, 1024, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf7 del primals_5 buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128)) del buf9 buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_5[grid(524288)](buf11, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf12 = extern_kernels.convolution(buf11, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf13 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_convolution_relu_6[grid(1024, 256)](buf12, primals_9, buf13, buf14, 1024, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf12 del primals_9 buf15 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 16, 16), (32768, 1, 2048, 128)) del buf14 buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_7[grid(131072)](buf16, primals_11, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf17 = extern_kernels.convolution(buf16, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 14, 14), (50176, 1, 3584, 256)) buf18 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.float32) buf19 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256), torch.float32) triton_poi_fused_convolution_relu_8[grid(1024, 196)](buf17, primals_13, buf18, buf19, 1024, 196, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf17 del primals_13 buf20 = extern_kernels.convolution(buf19, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 14, 14), (25088, 1, 1792, 128)) del buf19 buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_9[grid(100352)](buf21, primals_15, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 256, 12, 12), (36864, 1, 3072, 256)) buf23 = empty_strided_cuda((4, 256, 12, 12), (36864, 144, 12, 1), torch.float32) buf24 = empty_strided_cuda((4, 256, 12, 12), (36864, 1, 3072, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_10[grid(1024, 144) ](buf22, primals_17, buf23, buf24, 1024, 144, XBLOCK=32, YBLOCK =32, num_warps=4, num_stages=1) del buf22 del primals_17 return (buf8, buf13, buf18, buf23, primals_1, buf0, buf1, primals_6, buf2, primals_10, buf3, primals_14, buf4, buf6, buf8, buf11, buf13, buf16, buf18, buf21, buf24) class AuxiliaryConvolutionsNew(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutionsNew, self).__init__() self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, input_0): primals_1 = self.conv8_1.weight primals_2 = self.conv8_1.bias primals_4 = self.conv8_2.weight primals_5 = self.conv8_2.bias primals_6 = self.conv9_1.weight primals_7 = self.conv9_1.bias primals_8 = self.conv9_2.weight primals_9 = self.conv9_2.bias primals_10 = self.conv10_1.weight primals_11 = self.conv10_1.bias primals_12 = self.conv10_2.weight primals_13 = self.conv10_2.bias primals_14 = self.conv11_1.weight primals_15 = self.conv11_1.bias primals_16 = self.conv11_2.weight primals_17 = self.conv11_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1], output[2], output[3]
gigajet/ICDAR-2019-SROIE
AuxiliaryConvolutions
false
12,612
[ "MIT" ]
0
62dd3ecc90600c0bdf8ceece796fc4e555d3bd16
https://github.com/gigajet/ICDAR-2019-SROIE/tree/62dd3ecc90600c0bdf8ceece796fc4e555d3bd16
CAM_Module
from torch.nn import Module import torch from torch.nn import Parameter from torch.nn import Softmax class CAM_Module(Module): """ Channel attention module""" def __init__(self, in_dim): super(CAM_Module, self).__init__() self.chanel_in = in_dim self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, x): """ Calcuate attetion between channels Args: x: input feature maps (B * C * H * W) Returns: out: attention value + input feature (B * C * H * W) attention: B * C * C """ m_batchsize, C, height, wight = x.size() proj_query = x.view(m_batchsize, C, -1) proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1) proj_value = x.view(m_batchsize, C, -1) energy = torch.bmm(proj_query, proj_key) energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy ) - energy attention = self.softmax(energy_new) out = torch.bmm(attention, proj_value) out = out.view(m_batchsize, C, height, wight) mean = torch.mean(out) out = out / mean out = self.gamma * out + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn import Parameter from torch.nn import Softmax assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + x2, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp10 = tl.load(in_ptr2 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp8 = tmp0 / tmp5 tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp11, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_add_div_mean_mul_3[grid(1)](buf6, buf4, primals_2, primals_1, buf7, 1, 256, num_warps=2, num_stages=1) del primals_1 del primals_2 return buf7, buf4, buf6 class CAM_ModuleNew(Module): """ Channel attention module""" def __init__(self, in_dim): super(CAM_ModuleNew, self).__init__() self.chanel_in = in_dim self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, input_0): primals_2 = self.gamma primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
jiaxu0017/Segmentation_attention_mainfold-Pytorch
CAM_Module
false
12,613
[ "MIT" ]
0
ff42168b5e77618221dc3bc6887765aa14530e8e
https://github.com/jiaxu0017/Segmentation_attention_mainfold-Pytorch/tree/ff42168b5e77618221dc3bc6887765aa14530e8e
EqualLinearActModule
import torch from copy import deepcopy import torch.nn as nn from functools import partial from torch.nn.init import _calculate_correct_fan def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModule(nn.Module): """Equalized LR Linear Module with Activation Layer. This module is modified from ``EqualizedLRLinearModule`` defined in PGGAN. The major features updated in this module is adding support for activation layers used in StyleGAN2. Args: equalized_lr_cfg (dict | None, optional): Config for equalized lr. Defaults to dict(gain=1., lr_mul=1.). bias (bool, optional): Whether to use bias item. Defaults to True. bias_init (float, optional): The value for bias initialization. Defaults to ``0.``. act_cfg (dict | None, optional): Config for activation layer. Defaults to None. """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super().__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, ...). Returns: Tensor: Output feature map. """ if x.ndim >= 3: x = x.reshape(x.size(0), -1) x = self.linear(x) if self.with_activation and self.act_type == 'fused_bias': x = self.activate(x, self.bias * self.lr_mul) elif self.bias is not None and self.with_activation: x = self.activate(x + self.bias * self.lr_mul) elif self.bias is not None: x = x + self.bias * self.lr_mul elif self.with_activation: x = self.activate(x) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from copy import deepcopy import torch.nn as nn from functools import partial from torch.nn.init import _calculate_correct_fan assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sqrt_0[grid(16)](primals_2, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 return buf2, buf0, primals_1 def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModuleNew(nn.Module): """Equalized LR Linear Module with Activation Layer. This module is modified from ``EqualizedLRLinearModule`` defined in PGGAN. The major features updated in this module is adding support for activation layers used in StyleGAN2. Args: equalized_lr_cfg (dict | None, optional): Config for equalized lr. Defaults to dict(gain=1., lr_mul=1.). bias (bool, optional): Whether to use bias item. Defaults to True. bias_init (float, optional): The value for bias initialization. Defaults to ``0.``. act_cfg (dict | None, optional): Config for activation layer. Defaults to None. """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super().__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, input_0): primals_3 = self.bias primals_1 = self.linear.weight_orig primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jiangwenj02/mmgeneration
EqualLinearActModule
false
12,614
[ "Apache-2.0" ]
0
da9ad377ae19260467fc332ddb88f505c38a915a
https://github.com/jiangwenj02/mmgeneration/tree/da9ad377ae19260467fc332ddb88f505c38a915a
AdaptiveInstanceNorm
import torch import torch.nn as nn from torch.nn.init import _calculate_correct_fan def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class AdaptiveInstanceNorm(nn.Module): """Adaptive Instance Normalization Module. Ref: https://github.com/rosinality/style-based-gan-pytorch/blob/master/model.py # noqa Args: in_channel (int): The number of input's channel. style_dim (int): Style latent dimension. """ def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.affine = EqualizedLRLinearModule(style_dim, in_channel * 2) self.affine.bias.data[:in_channel] = 1 self.affine.bias.data[in_channel:] = 0 def forward(self, input, style): """Forward function. Args: input (Tensor): Input tensor with shape (n, c, h, w). style (Tensor): Input style tensor with shape (n, c). Returns: Tensor: Forward results. """ style = self.affine(style).unsqueeze(2).unsqueeze(3) gamma, beta = style.chunk(2, 1) out = self.norm(input) out = gamma * out + beta return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.init import _calculate_correct_fan assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_lift_fresh_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.4142135381698608 tmp2 = tmp0 * tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp24 = tmp22 + tmp23 tmp25 = tmp0 - tmp10 tmp26 = tmp25 * tmp21 tmp27 = tmp24 * tmp26 tmp30 = tmp28 + tmp29 tmp31 = tmp27 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_lift_fresh_mul_sqrt_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4 ), 0), out=buf1) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit_add_mul_1[grid(16)](buf5, primals_4, buf1, primals_2, buf2, buf6, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf1 del primals_2 return buf6, buf0, primals_3, primals_4, buf2, buf5 def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class AdaptiveInstanceNormNew(nn.Module): """Adaptive Instance Normalization Module. Ref: https://github.com/rosinality/style-based-gan-pytorch/blob/master/model.py # noqa Args: in_channel (int): The number of input's channel. style_dim (int): Style latent dimension. """ def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.affine = EqualizedLRLinearModule(style_dim, in_channel * 2) self.affine.bias.data[:in_channel] = 1 self.affine.bias.data[in_channel:] = 0 def forward(self, input_0, input_1): primals_2 = self.affine.bias primals_1 = self.affine.weight_orig primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
jiangwenj02/mmgeneration
AdaptiveInstanceNorm
false
12,615
[ "Apache-2.0" ]
0
da9ad377ae19260467fc332ddb88f505c38a915a
https://github.com/jiangwenj02/mmgeneration/tree/da9ad377ae19260467fc332ddb88f505c38a915a
Critic
import torch import torch.nn as nn import torch.nn.functional as F class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() self.l1 = nn.Linear(state_dim + action_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, 1) self.l4 = nn.Linear(state_dim + action_dim, 400) self.l5 = nn.Linear(400, 300) self.l6 = nn.Linear(300, 1) def forward(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) x2 = F.relu(self.l4(xu)) x2 = F.relu(self.l5(x2)) x2 = self.l6(x2) return x1, x2 def Q1(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) return x1 def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400,), (1,)) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300,), (1,)) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1,), (1,)) assert_size_stride(primals_9, (400, 8), (8, 1)) assert_size_stride(primals_10, (400,), (1,)) assert_size_stride(primals_11, (300, 400), (400, 1)) assert_size_stride(primals_12, (300,), (1,)) assert_size_stride(primals_13, (1, 300), (300, 1)) assert_size_stride(primals_14, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(1600)](buf2, primals_4, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), ( 1, 400), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_2[grid(1200)](buf4, primals_6, 1200, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf7) del primals_9 buf8 = buf7 del buf7 triton_poi_fused_relu_1[grid(1600)](buf8, primals_10, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_10 buf9 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_relu_2[grid(1200)](buf10, primals_12, 1200, XBLOCK =256, num_warps=4, num_stages=1) del primals_12 buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_14, buf10, reinterpret_tensor( primals_13, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf12) del primals_14 return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5) class CriticNew(nn.Module): def __init__(self, state_dim, action_dim): super(CriticNew, self).__init__() self.l1 = nn.Linear(state_dim + action_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, 1) self.l4 = nn.Linear(state_dim + action_dim, 400) self.l5 = nn.Linear(400, 300) self.l6 = nn.Linear(300, 1) def Q1(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) return x1 def forward(self, input_0, input_1): primals_3 = self.l1.weight primals_4 = self.l1.bias primals_5 = self.l2.weight primals_6 = self.l2.bias primals_7 = self.l3.weight primals_8 = self.l3.bias primals_9 = self.l4.weight primals_10 = self.l4.bias primals_11 = self.l5.weight primals_12 = self.l5.bias primals_13 = self.l6.weight primals_14 = self.l6.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0], output[1]
jinPrelude/ksp-ai
Critic
false
12,616
[ "MIT" ]
0
d8b235d1ef77afe413fbff2e859e1210330bde37
https://github.com/jinPrelude/ksp-ai/tree/d8b235d1ef77afe413fbff2e859e1210330bde37
LinearDeepQNetwork
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch as T class LinearDeepQNetwork(nn.Module): def __init__(self, lr, input, n_actions): super(LinearDeepQNetwork, self).__init__() self.fc1 = nn.Linear(input, 128) self.fc2 = nn.Linear(128, n_actions) self.optimizer = optim.Adam(self.parameters(), lr=lr) self.loss = nn.MSELoss() self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu') self def forward(self, state): layer1 = F.relu(self.fc1(state)) actions = self.fc2(layer1) return actions def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'lr': 4, 'input': 4, 'n_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim as optim import torch as T assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf3, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3 class LinearDeepQNetworkNew(nn.Module): def __init__(self, lr, input, n_actions): super(LinearDeepQNetworkNew, self).__init__() self.fc1 = nn.Linear(input, 128) self.fc2 = nn.Linear(128, n_actions) self.optimizer = optim.Adam(self.parameters(), lr=lr) self.loss = nn.MSELoss() self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu') self def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
joaomanojr/tecprog
LinearDeepQNetwork
false
12,617
[ "MIT" ]
0
825ae3dd9f2ddd0bce2d410af7deae8eb5ba3d21
https://github.com/joaomanojr/tecprog/tree/825ae3dd9f2ddd0bce2d410af7deae8eb5ba3d21
GCN
from torch.nn import Module import math import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module import torch.nn.functional as F class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.matmul(input, self.weight) output = torch.matmul(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp6 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp9 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tl_math.log(tmp22) tl.store(out_ptr0 + x4, tmp11, xmask) tl.store(out_ptr1 + x4, tmp23, xmask) @triton.jit def triton_poi_fused__log_softmax_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = xindex // 64 x5 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x5 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (x5 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + x4, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf2, primals_4, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_5, out=buf3) buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__log_softmax_add_1[grid(64)](buf4, primals_6, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__log_softmax_add_2[grid(256)](buf7, primals_6, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 del primals_6 return buf7, buf7, reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (4, 64), (1, 4), 0), reinterpret_tensor( primals_5, (4, 4), (1, 4), 0), buf8, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.matmul(input, self.weight) output = torch.matmul(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_5 = self.gc2.weight primals_6 = self.gc2.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jindl465/pygcn
GCN
false
12,618
[ "MIT" ]
0
bbbedc2278d1b1bc260e138f98cf27733995914d
https://github.com/jindl465/pygcn/tree/bbbedc2278d1b1bc260e138f98cf27733995914d
CSNet
import torch class CSNet(torch.nn.Module): def __init__(self): super(CSNet, self).__init__() k_stride = 20 color_channel = 3 mr = 12 self.conv0 = torch.nn.Conv2d(in_channels=color_channel, out_channels=mr, kernel_size=2 * k_stride, stride=k_stride, padding=k_stride) self.deconv0 = torch.nn.ConvTranspose2d(in_channels=mr, out_channels=color_channel, kernel_size=2 * k_stride, stride= k_stride, padding=k_stride) self.conv1_1 = torch.nn.Conv2d(in_channels=color_channel, out_channels=64, kernel_size=11, stride=1, padding=5) self.conv1_2 = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, stride=1, padding=0) self.conv1_3 = torch.nn.Conv2d(in_channels=32, out_channels= color_channel, kernel_size=7, stride=1, padding=3) self.conv2_1 = torch.nn.Conv2d(in_channels=color_channel, out_channels=64, kernel_size=11, stride=1, padding=5) self.conv2_2 = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, stride=1, padding=0) self.conv2_3 = torch.nn.Conv2d(in_channels=32, out_channels= color_channel, kernel_size=7, stride=1, padding=3) self.conv3_1 = torch.nn.Conv2d(in_channels=color_channel, out_channels=64, kernel_size=11, stride=1, padding=5) self.conv3_2 = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, stride=1, padding=0) self.conv3_3 = torch.nn.Conv2d(in_channels=32, out_channels= color_channel, kernel_size=7, stride=1, padding=3) def forward(self, x): measurement = self.conv0(x) y0 = self.deconv0(measurement) y = torch.nn.functional.relu(self.conv1_1(y0)) y = torch.nn.functional.relu(self.conv1_2(y)) y1 = y0 + self.conv1_3(y) y = torch.nn.functional.relu(self.conv2_1(y1)) y = torch.nn.functional.relu(self.conv2_2(y)) y2 = y1 + self.conv2_3(y) y = torch.nn.functional.relu(self.conv3_1(y2)) y = torch.nn.functional.relu(self.conv3_2(y)) y = y2 + self.conv3_3(y) return measurement, y def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 12 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 43200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3600 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3600 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 43200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 3 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (12, 3, 40, 40), (4800, 1600, 40, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (12, 3, 40, 40), (4800, 1600, 40, 1)) assert_size_stride(primals_5, (3,), (1,)) assert_size_stride(primals_6, (64, 3, 11, 11), (363, 121, 11, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (3, 32, 7, 7), (1568, 49, 7, 1)) assert_size_stride(primals_11, (3,), (1,)) assert_size_stride(primals_12, (64, 3, 11, 11), (363, 121, 11, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_15, (32,), (1,)) assert_size_stride(primals_16, (3, 32, 7, 7), (1568, 49, 7, 1)) assert_size_stride(primals_17, (3,), (1,)) assert_size_stride(primals_18, (64, 3, 11, 11), (363, 121, 11, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_21, (32,), (1,)) assert_size_stride(primals_22, (3, 32, 7, 7), (1568, 49, 7, 1)) assert_size_stride(primals_23, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(20, 20), padding=(20, 20), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 12, 4, 4), (192, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(768)](buf1, primals_2, 768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(20, 20), padding=(20, 20), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 3, 60, 60), (10800, 3600, 60, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(43200)](buf3, primals_5, 43200, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(5, 5), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 60, 60), (230400, 3600, 60, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(921600)](buf5, primals_7, 921600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 60, 60), (115200, 3600, 60, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(460800)](buf7, primals_9, 460800, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 60, 60), (10800, 3600, 60, 1)) buf9 = buf8 del buf8 triton_poi_fused_add_convolution_4[grid(43200)](buf9, buf3, primals_11, 43200, XBLOCK=512, num_warps=4, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(5, 5), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 60, 60), (230400, 3600, 60, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_2[grid(921600)](buf11, primals_13, 921600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 32, 60, 60), (115200, 3600, 60, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_3[grid(460800)](buf13, primals_15, 460800, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 3, 60, 60), (10800, 3600, 60, 1)) buf15 = buf14 del buf14 triton_poi_fused_add_convolution_4[grid(43200)](buf15, buf9, primals_17, 43200, XBLOCK=512, num_warps=4, num_stages=1) del primals_17 buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(5, 5), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 60, 60), (230400, 3600, 60, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_2[grid(921600)](buf17, primals_19, 921600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 32, 60, 60), (115200, 3600, 60, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_3[grid(460800)](buf19, primals_21, 460800, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 3, 60, 60), (10800, 3600, 60, 1)) buf21 = buf20 del buf20 triton_poi_fused_add_convolution_4[grid(43200)](buf21, buf15, primals_23, 43200, XBLOCK=512, num_warps=4, num_stages=1) del primals_23 return (buf1, buf21, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19) class CSNetNew(torch.nn.Module): def __init__(self): super(CSNetNew, self).__init__() k_stride = 20 color_channel = 3 mr = 12 self.conv0 = torch.nn.Conv2d(in_channels=color_channel, out_channels=mr, kernel_size=2 * k_stride, stride=k_stride, padding=k_stride) self.deconv0 = torch.nn.ConvTranspose2d(in_channels=mr, out_channels=color_channel, kernel_size=2 * k_stride, stride= k_stride, padding=k_stride) self.conv1_1 = torch.nn.Conv2d(in_channels=color_channel, out_channels=64, kernel_size=11, stride=1, padding=5) self.conv1_2 = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, stride=1, padding=0) self.conv1_3 = torch.nn.Conv2d(in_channels=32, out_channels= color_channel, kernel_size=7, stride=1, padding=3) self.conv2_1 = torch.nn.Conv2d(in_channels=color_channel, out_channels=64, kernel_size=11, stride=1, padding=5) self.conv2_2 = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, stride=1, padding=0) self.conv2_3 = torch.nn.Conv2d(in_channels=32, out_channels= color_channel, kernel_size=7, stride=1, padding=3) self.conv3_1 = torch.nn.Conv2d(in_channels=color_channel, out_channels=64, kernel_size=11, stride=1, padding=5) self.conv3_2 = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, stride=1, padding=0) self.conv3_3 = torch.nn.Conv2d(in_channels=32, out_channels= color_channel, kernel_size=7, stride=1, padding=3) def forward(self, input_0): primals_1 = self.conv0.weight primals_2 = self.conv0.bias primals_4 = self.deconv0.weight primals_5 = self.deconv0.bias primals_6 = self.conv1_1.weight primals_7 = self.conv1_1.bias primals_8 = self.conv1_2.weight primals_9 = self.conv1_2.bias primals_10 = self.conv1_3.weight primals_11 = self.conv1_3.bias primals_12 = self.conv2_1.weight primals_13 = self.conv2_1.bias primals_14 = self.conv2_2.weight primals_15 = self.conv2_2.bias primals_16 = self.conv2_3.weight primals_17 = self.conv2_3.bias primals_18 = self.conv3_1.weight primals_19 = self.conv3_1.bias primals_20 = self.conv3_2.weight primals_21 = self.conv3_2.bias primals_22 = self.conv3_3.weight primals_23 = self.conv3_3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0], output[1]
jiang-du/Multi-rate-VCS
CSNet
false
12,619
[ "MIT" ]
0
18457a7e0be76cad8b78b7dee32f8f6704d2f7e0
https://github.com/jiang-du/Multi-rate-VCS/tree/18457a7e0be76cad8b78b7dee32f8f6704d2f7e0
Net
import torch import torch.nn as tnn class Net(tnn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = tnn.Conv2d(3, 6, 5) self.pool = tnn.MaxPool2d(2, 2) self.conv2 = tnn.Conv2d(6, 16, 5) self.fc1 = tnn.Linear(16 * 5 * 5, 120) self.fc2 = tnn.Linear(120, 84) self.fc3 = tnn.Linear(84, 10) def forward(self, x): x = self.pool(self.conv1(x)) x = self.pool(self.conv2(x)) x = x.view(-1, 16 * 5 * 5) x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as tnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400 ), 0), alpha=1, beta=1, out=buf8) del primals_7 buf9 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), alpha=1, beta=1, out=buf9) del primals_9 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf9, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf10) del primals_11 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf8, buf9, primals_10, primals_8, primals_6) class NetNew(tnn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = tnn.Conv2d(3, 6, 5) self.pool = tnn.MaxPool2d(2, 2) self.conv2 = tnn.Conv2d(6, 16, 5) self.fc1 = tnn.Linear(16 * 5 * 5, 120) self.fc2 = tnn.Linear(120, 84) self.fc3 = tnn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
jittor-online-first/jittor
Net
false
12,620
[ "Apache-2.0" ]
0
4217359f86cbcf174fab27c3b723487a8d78b729
https://github.com/jittor-online-first/jittor/tree/4217359f86cbcf174fab27c3b723487a8d78b729
BahdanauAttention
import torch import torch.nn as nn import torch.nn.functional as F class BahdanauAttention(nn.Module): """ Class performs Additive Bahdanau Attention. Source: https://arxiv.org/pdf/1409.0473.pdf """ def __init__(self, num_features, hidden_dim, output_dim=1): super(BahdanauAttention, self).__init__() self.num_features = num_features self.hidden_dim = hidden_dim self.output_dim = output_dim self.W_a = nn.Linear(self.num_features, self.hidden_dim) self.U_a = nn.Linear(self.hidden_dim, self.hidden_dim) self.v_a = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, features, decoder_hidden): """ Arguments: ---------- - features - features returned from Encoder - decoder_hidden - hidden state output from Decoder Returns: --------- - context - context vector with a size of (1,2048) - atten_weight - probabilities, express the feature relevance """ decoder_hidden = decoder_hidden.unsqueeze(1) atten_1 = self.W_a(features) atten_2 = self.U_a(decoder_hidden) atten_tan = torch.tanh(atten_1 + atten_2) atten_score = self.v_a(atten_tan) atten_weight = F.softmax(atten_score, dim=1) context = torch.sum(atten_weight * features, dim=1) atten_weight = atten_weight.squeeze(dim=2) return context, atten_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x3 = xindex // 256 x5 = xindex % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_3, buf1, primals_6, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 del primals_6 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf0 triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_mul_sum_3[grid(256)](buf6, primals_4, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf7, buf6, primals_4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2, buf6, primals_7 class BahdanauAttentionNew(nn.Module): """ Class performs Additive Bahdanau Attention. Source: https://arxiv.org/pdf/1409.0473.pdf """ def __init__(self, num_features, hidden_dim, output_dim=1): super(BahdanauAttentionNew, self).__init__() self.num_features = num_features self.hidden_dim = hidden_dim self.output_dim = output_dim self.W_a = nn.Linear(self.num_features, self.hidden_dim) self.U_a = nn.Linear(self.hidden_dim, self.hidden_dim) self.v_a = nn.Linear(self.hidden_dim, self.output_dim) def forward(self, input_0, input_1): primals_2 = self.W_a.weight primals_3 = self.W_a.bias primals_5 = self.U_a.weight primals_6 = self.U_a.bias primals_7 = self.v_a.weight primals_8 = self.v_a.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
joao-d-oliveira/CV-Image_Captioning
BahdanauAttention
false
12,621
[ "MIT" ]
0
76186c326e4fc44a60da401f4ec71176cba42e87
https://github.com/joao-d-oliveira/CV-Image_Captioning/tree/76186c326e4fc44a60da401f4ec71176cba42e87
RegressionModel
import torch import torch.nn as nn class RegressionModel(nn.Module): def __init__(self, num_features_in, num_anchors=1, feature_size=256): super(RegressionModel, self).__init__() self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) self.act1 = nn.ReLU() self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act2 = nn.ReLU() self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act3 = nn.ReLU() self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act4 = nn.ReLU() self.output = nn.Conv2d(feature_size, num_anchors * 8, kernel_size= 3, padding=1) def forward(self, x): out = self.conv1(x) out = self.act1(out) out = self.conv2(out) out = self.act2(out) out = self.conv3(out) out = self.act3(out) out = self.conv4(out) out = self.act4(out) out = self.output(out) out = out.permute(0, 2, 3, 1) return out.contiguous().view(out.shape[0], -1, 8) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 128 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 8 * y3), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (8, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_0[grid(16384)](buf7, primals_9, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 8, 4, 4), (128, 16, 4, 1)) buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_poi_fused_clone_1[grid(64, 8)](buf8, primals_11, buf9, 64, 8, XBLOCK=8, YBLOCK=32, num_warps=4, num_stages=1) del buf8 del primals_11 return (reinterpret_tensor(buf9, (4, 16, 8), (128, 8, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7) class RegressionModelNew(nn.Module): def __init__(self, num_features_in, num_anchors=1, feature_size=256): super(RegressionModelNew, self).__init__() self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) self.act1 = nn.ReLU() self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act2 = nn.ReLU() self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act3 = nn.ReLU() self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act4 = nn.ReLU() self.output = nn.Conv2d(feature_size, num_anchors * 8, kernel_size= 3, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.output.weight primals_11 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
fmrdev/ctracker
RegressionModel
false
12,622
[ "Apache-2.0" ]
0
6f5a88d569d0132a9f844cd1e55e60032d32bcba
https://github.com/fmrdev/ctracker/tree/6f5a88d569d0132a9f844cd1e55e60032d32bcba
ModulatedToRGB
import torch import torch.nn.functional as F from copy import deepcopy import torch.nn as nn from functools import partial from torch.nn.init import _calculate_correct_fan def upsample(in_tens, out_H=64): """Upsamples the input to the given size. Args: in_tens (Tensor): Tensor with shape [N, C, H, W]. out_H (int, optional): Output spatial size. Defaults to 64. Returns: Tensor: Output Tensor. """ in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module def _make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModule(nn.Module): """Equalized LR Linear Module with Activation Layer. This module is modified from ``EqualizedLRLinearModule`` defined in PGGAN. The major features updated in this module is adding support for activation layers used in StyleGAN2. Args: equalized_lr_cfg (dict | None, optional): Config for equalized lr. Defaults to dict(gain=1., lr_mul=1.). bias (bool, optional): Whether to use bias item. Defaults to True. bias_init (float, optional): The value for bias initialization. Defaults to ``0.``. act_cfg (dict | None, optional): Config for activation layer. Defaults to None. """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super().__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, ...). Returns: Tensor: Output feature map. """ if x.ndim >= 3: x = x.reshape(x.size(0), -1) x = self.linear(x) if self.with_activation and self.act_type == 'fused_bias': x = self.activate(x, self.bias * self.lr_mul) elif self.bias is not None and self.with_activation: x = self.activate(x + self.bias * self.lr_mul) elif self.bias is not None: x = x + self.bias * self.lr_mul elif self.with_activation: x = self.activate(x) return x class Blur(nn.Module): """Blur module. This module is adopted rightly after upsampling operation in StyleGAN2. Args: kernel (Array): Blur kernel/filter used in UpFIRDn. pad (list[int]): Padding for features. upsample_factor (int, optional): Upsampling factor. Defaults to 1. """ def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = _make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map. """ return upfirdn2d(x, self.kernel, pad=self.pad) class ModulatedConv2d(nn.Module): """Modulated Conv2d in StyleGANv2. This module implements the modulated convolution layers proposed in StyleGAN2. Details can be found in Analyzing and Improving the Image Quality of StyleGAN, CVPR2020. Args: in_channels (int): Input channels. out_channels (int): Output channels. kernel_size (int): Kernel size, same as :obj:`nn.Con2d`. style_channels (int): Channels for the style codes. demodulate (bool, optional): Whether to adopt demodulation. Defaults to True. upsample (bool, optional): Whether to adopt upsampling in features. Defaults to False. downsample (bool, optional): Whether to adopt downsampling in features. Defaults to False. blur_kernel (list[int], optional): Blurry kernel. Defaults to [1, 3, 3, 1]. equalized_lr_cfg (dict | None, optional): Configs for equalized lr. Defaults to dict(mode='fan_in', lr_mul=1., gain=1.). style_mod_cfg (dict, optional): Configs for style modulation module. Defaults to dict(bias_init=1.). style_bias (float, optional): Bias value for style code. Defaults to 0.. eps (float, optional): Epsilon value to avoid computation error. Defaults to 1e-8. """ def __init__(self, in_channels, out_channels, kernel_size, style_channels, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1], equalized_lr_cfg=dict(mode='fan_in', lr_mul=1.0, gain=1.0), style_mod_cfg=dict(bias_init=1.0), style_bias=0.0, eps=1e-08): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.style_channels = style_channels self.demodulate = demodulate assert isinstance(self.kernel_size, int) and (self.kernel_size >= 1 and self.kernel_size % 2 == 1) self.upsample = upsample self.downsample = downsample self.style_bias = style_bias self.eps = eps style_mod_cfg = dict() if style_mod_cfg is None else style_mod_cfg self.style_modulation = EqualLinearActModule(style_channels, in_channels, **style_mod_cfg) lr_mul_ = 1.0 if equalized_lr_cfg is not None: lr_mul_ = equalized_lr_cfg.get('lr_mul', 1.0) self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size).div_(lr_mul_)) if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, (pad0, pad1), upsample_factor=factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) if equalized_lr_cfg is not None: equalized_lr(self, **equalized_lr_cfg) self.padding = kernel_size // 2 def forward(self, x, style): n, c, h, w = x.shape style = self.style_modulation(style).view(n, 1, c, 1, 1 ) + self.style_bias weight = self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(n, self.out_channels, 1, 1, 1) weight = weight.view(n * self.out_channels, c, self.kernel_size, self.kernel_size) if self.upsample: x = x.reshape(1, n * c, h, w) weight = weight.view(n, self.out_channels, c, self.kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(n * c, self. out_channels, self.kernel_size, self.kernel_size) x = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=n) x = x.reshape(n, self.out_channels, *x.shape[-2:]) x = self.blur(x) elif self.downsample: x = self.blur(x) x = x.view(1, n * self.in_channels, *x.shape[-2:]) x = F.conv2d(x, weight, stride=2, padding=0, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) else: x = x.view(1, n * c, h, w) x = F.conv2d(x, weight, stride=1, padding=self.padding, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) return x class UpsampleUpFIRDn(nn.Module): """UpFIRDn for Upsampling. This module is used in the ``to_rgb`` layers in StyleGAN2 for upsampling the images. Args: kernel (Array): Blur kernel/filter used in UpFIRDn. factor (int, optional): Upsampling factor. Defaults to 2. """ def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = _make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map. """ out = upfirdn2d(x, self.kernel, up=self.factor, down=1, pad=self.pad) return out class ModulatedToRGB(nn.Module): """To RGB layer. This module is designed to output image tensor in StyleGAN2. Args: in_channels (int): Input channels. style_channels (int): Channels for the style codes. out_channels (int, optional): Output channels. Defaults to 3. upsample (bool, optional): Whether to adopt upsampling in features. Defaults to False. blur_kernel (list[int], optional): Blurry kernel. Defaults to [1, 3, 3, 1]. style_mod_cfg (dict, optional): Configs for style modulation module. Defaults to dict(bias_init=1.). style_bias (float, optional): Bias value for style code. Defaults to 0.. """ def __init__(self, in_channels, style_channels, out_channels=3, upsample=True, blur_kernel=[1, 3, 3, 1], style_mod_cfg=dict( bias_init=1.0), style_bias=0.0): super().__init__() if upsample: self.upsample = UpsampleUpFIRDn(blur_kernel) self.conv = ModulatedConv2d(in_channels, out_channels=out_channels, kernel_size=1, style_channels=style_channels, demodulate=False, style_mod_cfg=style_mod_cfg, style_bias=style_bias) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, x, style, skip=None): """Forward Function. Args: x ([Tensor): Input features with shape of (N, C, H, W). style (Tensor): Style latent with shape of (N, C). skip (Tensor, optional): Tensor for skip link. Defaults to None. Returns: Tensor: Output features with shape of (N, C, H, W) """ out = self.conv(x, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'style_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F from copy import deepcopy import torch.nn as nn from functools import partial from torch.nn.init import _calculate_correct_fan assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_sqrt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = 0.0 tmp7 = tmp5 + tmp6 tmp8 = tmp0 * tmp7 tl.store(out_ptr0 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) get_raw_stream(0) triton_poi_fused_mul_sqrt_0[grid(12)](primals_1, buf0, 12, XBLOCK= 16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sqrt_1[grid(16)](primals_4, buf1, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf1, (4, 4), (1, 4 ), 0), out=buf2) buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_add_mul_2[grid(48)](buf0, buf2, primals_5, buf3, 48, XBLOCK=64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return (buf5, buf0, buf1, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_2, (1, 16, 4, 4), (256, 16, 4, 1), 0)) def upsample(in_tens, out_H=64): """Upsamples the input to the given size. Args: in_tens (Tensor): Tensor with shape [N, C, H, W]. out_H (int, optional): Output spatial size. Defaults to 64. Returns: Tensor: Output Tensor. """ in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module def _make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModule(nn.Module): """Equalized LR Linear Module with Activation Layer. This module is modified from ``EqualizedLRLinearModule`` defined in PGGAN. The major features updated in this module is adding support for activation layers used in StyleGAN2. Args: equalized_lr_cfg (dict | None, optional): Config for equalized lr. Defaults to dict(gain=1., lr_mul=1.). bias (bool, optional): Whether to use bias item. Defaults to True. bias_init (float, optional): The value for bias initialization. Defaults to ``0.``. act_cfg (dict | None, optional): Config for activation layer. Defaults to None. """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super().__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, ...). Returns: Tensor: Output feature map. """ if x.ndim >= 3: x = x.reshape(x.size(0), -1) x = self.linear(x) if self.with_activation and self.act_type == 'fused_bias': x = self.activate(x, self.bias * self.lr_mul) elif self.bias is not None and self.with_activation: x = self.activate(x + self.bias * self.lr_mul) elif self.bias is not None: x = x + self.bias * self.lr_mul elif self.with_activation: x = self.activate(x) return x class Blur(nn.Module): """Blur module. This module is adopted rightly after upsampling operation in StyleGAN2. Args: kernel (Array): Blur kernel/filter used in UpFIRDn. pad (list[int]): Padding for features. upsample_factor (int, optional): Upsampling factor. Defaults to 1. """ def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = _make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map. """ return upfirdn2d(x, self.kernel, pad=self.pad) class ModulatedConv2d(nn.Module): """Modulated Conv2d in StyleGANv2. This module implements the modulated convolution layers proposed in StyleGAN2. Details can be found in Analyzing and Improving the Image Quality of StyleGAN, CVPR2020. Args: in_channels (int): Input channels. out_channels (int): Output channels. kernel_size (int): Kernel size, same as :obj:`nn.Con2d`. style_channels (int): Channels for the style codes. demodulate (bool, optional): Whether to adopt demodulation. Defaults to True. upsample (bool, optional): Whether to adopt upsampling in features. Defaults to False. downsample (bool, optional): Whether to adopt downsampling in features. Defaults to False. blur_kernel (list[int], optional): Blurry kernel. Defaults to [1, 3, 3, 1]. equalized_lr_cfg (dict | None, optional): Configs for equalized lr. Defaults to dict(mode='fan_in', lr_mul=1., gain=1.). style_mod_cfg (dict, optional): Configs for style modulation module. Defaults to dict(bias_init=1.). style_bias (float, optional): Bias value for style code. Defaults to 0.. eps (float, optional): Epsilon value to avoid computation error. Defaults to 1e-8. """ def __init__(self, in_channels, out_channels, kernel_size, style_channels, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1], equalized_lr_cfg=dict(mode='fan_in', lr_mul=1.0, gain=1.0), style_mod_cfg=dict(bias_init=1.0), style_bias=0.0, eps=1e-08): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.style_channels = style_channels self.demodulate = demodulate assert isinstance(self.kernel_size, int) and (self.kernel_size >= 1 and self.kernel_size % 2 == 1) self.upsample = upsample self.downsample = downsample self.style_bias = style_bias self.eps = eps style_mod_cfg = dict() if style_mod_cfg is None else style_mod_cfg self.style_modulation = EqualLinearActModule(style_channels, in_channels, **style_mod_cfg) lr_mul_ = 1.0 if equalized_lr_cfg is not None: lr_mul_ = equalized_lr_cfg.get('lr_mul', 1.0) self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size).div_(lr_mul_)) if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, (pad0, pad1), upsample_factor=factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) if equalized_lr_cfg is not None: equalized_lr(self, **equalized_lr_cfg) self.padding = kernel_size // 2 def forward(self, x, style): n, c, h, w = x.shape style = self.style_modulation(style).view(n, 1, c, 1, 1 ) + self.style_bias weight = self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(n, self.out_channels, 1, 1, 1) weight = weight.view(n * self.out_channels, c, self.kernel_size, self.kernel_size) if self.upsample: x = x.reshape(1, n * c, h, w) weight = weight.view(n, self.out_channels, c, self.kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(n * c, self. out_channels, self.kernel_size, self.kernel_size) x = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=n) x = x.reshape(n, self.out_channels, *x.shape[-2:]) x = self.blur(x) elif self.downsample: x = self.blur(x) x = x.view(1, n * self.in_channels, *x.shape[-2:]) x = F.conv2d(x, weight, stride=2, padding=0, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) else: x = x.view(1, n * c, h, w) x = F.conv2d(x, weight, stride=1, padding=self.padding, groups=n) x = x.view(n, self.out_channels, *x.shape[-2:]) return x class UpsampleUpFIRDn(nn.Module): """UpFIRDn for Upsampling. This module is used in the ``to_rgb`` layers in StyleGAN2 for upsampling the images. Args: kernel (Array): Blur kernel/filter used in UpFIRDn. factor (int, optional): Upsampling factor. Defaults to 2. """ def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = _make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, x): """Forward function. Args: x (Tensor): Input feature map with shape of (N, C, H, W). Returns: Tensor: Output feature map. """ out = upfirdn2d(x, self.kernel, up=self.factor, down=1, pad=self.pad) return out class ModulatedToRGBNew(nn.Module): """To RGB layer. This module is designed to output image tensor in StyleGAN2. Args: in_channels (int): Input channels. style_channels (int): Channels for the style codes. out_channels (int, optional): Output channels. Defaults to 3. upsample (bool, optional): Whether to adopt upsampling in features. Defaults to False. blur_kernel (list[int], optional): Blurry kernel. Defaults to [1, 3, 3, 1]. style_mod_cfg (dict, optional): Configs for style modulation module. Defaults to dict(bias_init=1.). style_bias (float, optional): Bias value for style code. Defaults to 0.. """ def __init__(self, in_channels, style_channels, out_channels=3, upsample=True, blur_kernel=[1, 3, 3, 1], style_mod_cfg=dict( bias_init=1.0), style_bias=0.0): super().__init__() if upsample: self.upsample = UpsampleUpFIRDn(blur_kernel) self.conv = ModulatedConv2d(in_channels, out_channels=out_channels, kernel_size=1, style_channels=style_channels, demodulate=False, style_mod_cfg=style_mod_cfg, style_bias=style_bias) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_1 = self.conv.weight_orig primals_5 = self.conv.style_modulation.bias primals_3 = self.conv.style_modulation.linear.weight_orig primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jiangwenj02/mmgeneration
ModulatedToRGB
false
12,623
[ "Apache-2.0" ]
0
da9ad377ae19260467fc332ddb88f505c38a915a
https://github.com/jiangwenj02/mmgeneration/tree/da9ad377ae19260467fc332ddb88f505c38a915a
ClassificationModel
import torch import torch.nn as nn class ClassificationModel(nn.Module): def __init__(self, num_features_in, num_anchors=1, num_classes=80, prior=0.01, feature_size=256): super(ClassificationModel, self).__init__() self.num_classes = num_classes self.num_anchors = num_anchors self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) self.act1 = nn.ReLU() self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act2 = nn.ReLU() self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act3 = nn.ReLU() self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act4 = nn.ReLU() self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1) self.output_act = nn.Sigmoid() def forward(self, x): out = self.conv1(x) out = self.act1(out) out = self.conv2(out) out = self.act2(out) out = self.conv3(out) out = self.act3(out) out = self.conv4(out) out = self.act4(out) out = self.output(out) out = self.output_act(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 80 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (80, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (80,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_0[grid(16384)](buf7, primals_9, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 80, 4, 4), (1280, 16, 4, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_sigmoid_1[grid(5120)](buf9, primals_11, 5120, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf9) class ClassificationModelNew(nn.Module): def __init__(self, num_features_in, num_anchors=1, num_classes=80, prior=0.01, feature_size=256): super(ClassificationModelNew, self).__init__() self.num_classes = num_classes self.num_anchors = num_anchors self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) self.act1 = nn.ReLU() self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act2 = nn.ReLU() self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act3 = nn.ReLU() self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act4 = nn.ReLU() self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1) self.output_act = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.output.weight primals_11 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
fmrdev/ctracker
ClassificationModel
false
12,624
[ "Apache-2.0" ]
0
6f5a88d569d0132a9f844cd1e55e60032d32bcba
https://github.com/fmrdev/ctracker/tree/6f5a88d569d0132a9f844cd1e55e60032d32bcba
GroupNorm32
import torch import torch.nn as nn import torch.nn.functional as F class GroupNorm32(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, x): y = super().forward(x.float()) if self.swish == 1.0: y = F.silu(y) elif self.swish: y = y * F.sigmoid(y * float(self.swish)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1, 'num_channels': 4, 'swish': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 4.0 tmp29 = tmp27 * tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = tmp27 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(in_out_ptr1 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = buf4 del buf4 get_raw_stream(0) triton_per_fused_mul_native_group_norm_sigmoid_0[grid(4)](buf3, buf5, primals_1, primals_2, primals_3, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) return buf5, primals_1, primals_2, primals_3, buf0, buf3 class GroupNorm32New(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
johnpaulbin/glide-text2im
GroupNorm32
false
12,625
[ "MIT" ]
0
4897050c4c540316dfb1ec7e6ff95698bcb20487
https://github.com/johnpaulbin/glide-text2im/tree/4897050c4c540316dfb1ec7e6ff95698bcb20487
TransformerEncoderLayer
import math import torch from torch import nn import torch.nn.functional as F def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, dropout=0): super(TransformerFFN, self).__init__() self.dropout = nn.Dropout(p=dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.dropout(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerEncoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) def forward(self, tensor, mask): tensor = tensor + self.attention(tensor, mask=mask) tensor = _normalize(tensor, self.norm1) tensor = tensor + self.ffn(tensor) tensor = _normalize(tensor, self.norm2) tensor *= mask.unsqueeze(-1).float() return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_mul_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_7, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_5, buf5, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(64)](primals_8, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](primals_1, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf18, primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, primals_8, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_8, primals_11, primals_17, buf7, buf10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor( buf20, (16, 4), (4, 1), 0), primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, dropout=0): super(TransformerFFN, self).__init__() self.dropout = nn.Dropout(p=dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.dropout(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerEncoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, dropout= relu_dropout) self.norm2 = nn.LayerNorm(embedding_size) def forward(self, input_0, input_1): primals_2 = self.attention.q_lin.weight primals_3 = self.attention.q_lin.bias primals_4 = self.attention.k_lin.weight primals_5 = self.attention.k_lin.bias primals_6 = self.attention.v_lin.weight primals_7 = self.attention.v_lin.bias primals_8 = self.attention.out_lin.weight primals_10 = self.attention.out_lin.bias primals_11 = self.norm1.weight primals_12 = self.norm1.bias primals_9 = self.ffn.lin1.weight primals_14 = self.ffn.lin1.bias primals_13 = self.ffn.lin2.weight primals_16 = self.ffn.lin2.bias primals_17 = self.norm2.weight primals_18 = self.norm2.bias primals_1 = input_0 primals_15 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
jinjiren/ParlAI
TransformerEncoderLayer
false
12,626
[ "MIT" ]
0
40799aeee69f2a0bb25a1341bb8da0c44861268e
https://github.com/jinjiren/ParlAI/tree/40799aeee69f2a0bb25a1341bb8da0c44861268e
ModulatedConv2d
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = nn.Identity() self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mul_pow_rsqrt_sum_0[grid(16)](buf1, primals_3, primals_2, buf2, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf3, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf3, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_2, primals_3, buf1, reinterpret_tensor(buf2, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = nn.Identity() self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_3 = self.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
johnberg1/psp_s
ModulatedConv2d
false
12,627
[ "Apache-2.0", "BSD-2-Clause", "MIT" ]
0
717f4c448a4e7537cf4b74067d454c7644609ca3
https://github.com/johnberg1/psp_s/tree/717f4c448a4e7537cf4b74067d454c7644609ca3
GaussianKLLoss
import torch import torch.nn as nn class GaussianKLLoss(nn.Module): def __init__(self): super(GaussianKLLoss, self).__init__() def forward(self, mu1, logvar1, mu2, logvar2): numerator = logvar1.exp() + torch.pow(mu1 - mu2, 2) fraction = torch.div(numerator, logvar2.exp()) kl = 0.5 * torch.sum(logvar2 - logvar1 + fraction - 1, dim=1) return kl.mean(dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_exp_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr3 + (x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr3 + (16 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp29 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr3 + (32 + x0 + 64 * x1), xmask) tmp42 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp43 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp46 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp47 = tl.load(in_ptr3 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.exp(tmp1) tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp9 = tl_math.exp(tmp0) tmp10 = tmp8 / tmp9 tmp11 = tmp2 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.exp(tmp15) tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp17 + tmp21 tmp23 = tl_math.exp(tmp14) tmp24 = tmp22 / tmp23 tmp25 = tmp16 + tmp24 tmp26 = tmp25 - tmp12 tmp27 = tmp13 + tmp26 tmp30 = tmp28 - tmp29 tmp31 = tl_math.exp(tmp29) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp34 tmp36 = tmp31 + tmp35 tmp37 = tl_math.exp(tmp28) tmp38 = tmp36 / tmp37 tmp39 = tmp30 + tmp38 tmp40 = tmp39 - tmp12 tmp41 = tmp27 + tmp40 tmp44 = tmp42 - tmp43 tmp45 = tl_math.exp(tmp43) tmp48 = tmp46 - tmp47 tmp49 = tmp48 * tmp48 tmp50 = tmp45 + tmp49 tmp51 = tl_math.exp(tmp42) tmp52 = tmp50 / tmp51 tmp53 = tmp44 + tmp52 tmp54 = tmp53 - tmp12 tmp55 = tmp41 + tmp54 tl.store(out_ptr0 + x2, tmp55, xmask) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + (16 + x0), xmask) tmp6 = tl.load(in_ptr0 + (32 + x0), xmask) tmp9 = tl.load(in_ptr0 + (48 + x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_exp_pow_sub_sum_0[grid(64)](arg3_1, arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mean_mul_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 return buf1, class GaussianKLLossNew(nn.Module): def __init__(self): super(GaussianKLLossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
johnson7788/Info-HCVAE
GaussianKLLoss
false
12,628
[ "Apache-2.0" ]
0
f43bf705aab3dcdc340ded3be09fb87420a48c51
https://github.com/johnson7788/Info-HCVAE/tree/f43bf705aab3dcdc340ded3be09fb87420a48c51
CategoricalKLLoss
import torch import torch.nn as nn class CategoricalKLLoss(nn.Module): def __init__(self): super(CategoricalKLLoss, self).__init__() def forward(self, P, Q): log_P = P.log() log_Q = Q.log() kl = (P * (log_P - log_Q)).sum(dim=-1).sum(dim=-1) return kl.mean(dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + 16 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr1 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr1 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr1 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr1 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp49 = tl.load(in_ptr1 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp55 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp57 = tl.load(in_ptr1 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp61 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp63 = tl.load(in_ptr1 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp68 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp70 = tl.load(in_ptr1 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp75 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp77 = tl.load(in_ptr1 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp83 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp85 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp89 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp91 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp96 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp98 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp103 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp105 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tmp0 * tmp4 tmp7 = tl_math.log(tmp6) tmp9 = tl_math.log(tmp8) tmp10 = tmp7 - tmp9 tmp11 = tmp6 * tmp10 tmp12 = tmp5 + tmp11 tmp14 = tl_math.log(tmp13) tmp16 = tl_math.log(tmp15) tmp17 = tmp14 - tmp16 tmp18 = tmp13 * tmp17 tmp19 = tmp12 + tmp18 tmp21 = tl_math.log(tmp20) tmp23 = tl_math.log(tmp22) tmp24 = tmp21 - tmp23 tmp25 = tmp20 * tmp24 tmp26 = tmp19 + tmp25 tmp28 = tl_math.log(tmp27) tmp30 = tl_math.log(tmp29) tmp31 = tmp28 - tmp30 tmp32 = tmp27 * tmp31 tmp34 = tl_math.log(tmp33) tmp36 = tl_math.log(tmp35) tmp37 = tmp34 - tmp36 tmp38 = tmp33 * tmp37 tmp39 = tmp32 + tmp38 tmp41 = tl_math.log(tmp40) tmp43 = tl_math.log(tmp42) tmp44 = tmp41 - tmp43 tmp45 = tmp40 * tmp44 tmp46 = tmp39 + tmp45 tmp48 = tl_math.log(tmp47) tmp50 = tl_math.log(tmp49) tmp51 = tmp48 - tmp50 tmp52 = tmp47 * tmp51 tmp53 = tmp46 + tmp52 tmp54 = tmp26 + tmp53 tmp56 = tl_math.log(tmp55) tmp58 = tl_math.log(tmp57) tmp59 = tmp56 - tmp58 tmp60 = tmp55 * tmp59 tmp62 = tl_math.log(tmp61) tmp64 = tl_math.log(tmp63) tmp65 = tmp62 - tmp64 tmp66 = tmp61 * tmp65 tmp67 = tmp60 + tmp66 tmp69 = tl_math.log(tmp68) tmp71 = tl_math.log(tmp70) tmp72 = tmp69 - tmp71 tmp73 = tmp68 * tmp72 tmp74 = tmp67 + tmp73 tmp76 = tl_math.log(tmp75) tmp78 = tl_math.log(tmp77) tmp79 = tmp76 - tmp78 tmp80 = tmp75 * tmp79 tmp81 = tmp74 + tmp80 tmp82 = tmp54 + tmp81 tmp84 = tl_math.log(tmp83) tmp86 = tl_math.log(tmp85) tmp87 = tmp84 - tmp86 tmp88 = tmp83 * tmp87 tmp90 = tl_math.log(tmp89) tmp92 = tl_math.log(tmp91) tmp93 = tmp90 - tmp92 tmp94 = tmp89 * tmp93 tmp95 = tmp88 + tmp94 tmp97 = tl_math.log(tmp96) tmp99 = tl_math.log(tmp98) tmp100 = tmp97 - tmp99 tmp101 = tmp96 * tmp100 tmp102 = tmp95 + tmp101 tmp104 = tl_math.log(tmp103) tmp106 = tl_math.log(tmp105) tmp107 = tmp104 - tmp106 tmp108 = tmp103 * tmp107 tmp109 = tmp102 + tmp108 tmp110 = tmp82 + tmp109 tl.store(out_ptr0 + x0, tmp110, xmask) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_mul_sub_sum_0[grid(16)](arg0_1, arg1_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps =1, num_stages=1) del buf0 return buf1, class CategoricalKLLossNew(nn.Module): def __init__(self): super(CategoricalKLLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
johnson7788/Info-HCVAE
CategoricalKLLoss
false
12,629
[ "Apache-2.0" ]
0
f43bf705aab3dcdc340ded3be09fb87420a48c51
https://github.com/johnson7788/Info-HCVAE/tree/f43bf705aab3dcdc340ded3be09fb87420a48c51
Feedback
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Feedback(nn.Module): def __init__(self, opt): super(Feedback, self).__init__() self.fc1 = nn.Linear(opt.ngh, opt.ngh) self.fc2 = nn.Linear(opt.ngh, opt.ngh) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, x): self.x1 = self.lrelu(self.fc1(x)) h = self.lrelu(self.fc2(self.x1)) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(ngh=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr1 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x4, tmp7, xmask) tl.store(out_ptr1 + x4, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0[grid(256)](buf1, primals_2, buf2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_view_2[grid(256)](buf5, primals_5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_5 return buf6, buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf3, buf7, primals_4, buf8 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class FeedbackNew(nn.Module): def __init__(self, opt): super(FeedbackNew, self).__init__() self.fc1 = nn.Linear(opt.ngh, opt.ngh) self.fc2 = nn.Linear(opt.ngh, opt.ngh) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
IacoSimoncini/tfvaegan
Feedback
false
12,630
[ "MIT" ]
0
157b526d65d0b0d5412f4be6fed02fc7d6325827
https://github.com/IacoSimoncini/tfvaegan/tree/157b526d65d0b0d5412f4be6fed02fc7d6325827
ToRGB
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = nn.Identity() self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_4, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(48)](primals_3, primals_2, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf0, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf1, (1, 12, 4, 4), (192, 16, 4, 1)) buf2 = reinterpret_tensor(buf1, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(192)](buf2, primals_4, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf2, primals_2, reinterpret_tensor(buf0, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = nn.Identity() self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_4 = self.bias primals_3 = self.conv.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
johnberg1/psp_s
ToRGB
false
12,631
[ "Apache-2.0", "BSD-2-Clause", "MIT" ]
0
717f4c448a4e7537cf4b74067d454c7644609ca3
https://github.com/johnberg1/psp_s/tree/717f4c448a4e7537cf4b74067d454c7644609ca3
SirenLayer
import torch import numpy as np from torch import nn class SirenLayer(nn.Module): def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False): super().__init__() self.in_f = in_f self.w0 = w0 self.linear = nn.Linear(in_f, out_f) self.is_first = is_first self.is_last = is_last self.init_weights() def init_weights(self): b = 1 / self.in_f if self.is_first else np.sqrt(6 / self.in_f ) / self.w0 with torch.no_grad(): self.linear.weight.uniform_(-b, b) def forward(self, x, **kwargs): x = self.linear(x) return x if self.is_last else torch.sin(self.w0 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_f': 4, 'out_f': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 30.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.sin(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class SirenLayerNew(nn.Module): def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False): super().__init__() self.in_f = in_f self.w0 = w0 self.linear = nn.Linear(in_f, out_f) self.is_first = is_first self.is_last = is_last self.init_weights() def init_weights(self): b = 1 / self.in_f if self.is_first else np.sqrt(6 / self.in_f ) / self.w0 with torch.no_grad(): self.linear.weight.uniform_(-b, b) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jonathanfrawley/pixel-nerf
SirenLayer
false
12,632
[ "BSD-2-Clause" ]
0
11d06decbda363d6c5188ec45091da8605da4dfd
https://github.com/jonathanfrawley/pixel-nerf/tree/11d06decbda363d6c5188ec45091da8605da4dfd
PredictionConvolutions
import torch from torch import nn import torch.optim import torch.utils.data class PredictionConvolutions(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 24564 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 24564 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 6, 'conv11_2': 4, 'conv12_2': 4} self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size= 3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) self.loc_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * 4, kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * n_classes, kernel_size=3, padding=1) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats): batch_size = conv4_3_feats.size(0) l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous() l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() l_conv7 = l_conv7.view(batch_size, -1, 4) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) l_conv12_2 = self.loc_conv12_2(conv12_2_feats) l_conv12_2 = l_conv12_2.permute(0, 2, 3, 1).contiguous() l_conv12_2 = l_conv12_2.view(batch_size, -1, 4) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous() c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() c_conv7 = c_conv7.view(batch_size, -1, self.n_classes) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) c_conv12_2 = self.cl_conv12_2(conv12_2_feats) c_conv12_2 = c_conv12_2.permute(0, 2, 3, 1).contiguous() c_conv12_2 = c_conv12_2.view(batch_size, -1, self.n_classes) locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2, l_conv12_2], dim=1) classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2, c_conv12_2], dim=1) return locs, classes_scores def get_inputs(): return [torch.rand([4, 512, 64, 64]), torch.rand([4, 1024, 64, 64]), torch.rand([4, 512, 64, 64]), torch.rand([4, 256, 64, 64]), torch. rand([4, 256, 64, 64]), torch.rand([4, 256, 64, 64]), torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 147456 x0 = xindex % 4 x2 = xindex // 589824 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 16384, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4096 * ((x0 + 4 * x1) % 16) + 65536 * ((x0 + 4 * x1 + 65536 * x2) // 65536 % 4) + (x0 + 4 * x1) // 16 % 4096), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 4 * x1) % 16, tmp4, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 40960, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (4096 * ((x0 + 4 * (-16384 + x1)) % 24) + 98304 * ((x0 + 4 * (-16384 + x1) + 98304 * x2) // 98304 % 4) + (x0 + 4 * (-16384 + x1)) // 24 % 4096), tmp13, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.load(in_ptr3 + (x0 + 4 * (-16384 + x1)) % 24, tmp13, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 65536, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + (4096 * ((x0 + 4 * (-40960 + x1)) % 24) + 98304 * ((x0 + 4 * (-40960 + x1) + 98304 * x2) // 98304 % 4) + (x0 + 4 * (-40960 + x1)) // 24 % 4096), tmp22, eviction_policy= 'evict_last', other=0.0) tmp24 = tl.load(in_ptr5 + (x0 + 4 * (-40960 + x1)) % 24, tmp22, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tmp29 = tl.full([1], 90112, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr6 + (4096 * ((x0 + 4 * (-65536 + x1)) % 24) + 98304 * ((x0 + 4 * (-65536 + x1) + 98304 * x2) // 98304 % 4) + (x0 + 4 * (-65536 + x1)) // 24 % 4096), tmp31, eviction_policy= 'evict_last', other=0.0) tmp33 = tl.load(in_ptr7 + (x0 + 4 * (-65536 + x1)) % 24, tmp31, eviction_policy='evict_last', other=0.0) tmp34 = tmp32 + tmp33 tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp31, tmp34, tmp35) tmp37 = tmp0 >= tmp29 tmp38 = tl.full([1], 114688, tl.int64) tmp39 = tmp0 < tmp38 tmp40 = tmp37 & tmp39 tmp41 = tl.load(in_ptr8 + (4096 * ((x0 + 4 * (-90112 + x1)) % 24) + 98304 * ((x0 + 4 * (-90112 + x1) + 98304 * x2) // 98304 % 4) + (x0 + 4 * (-90112 + x1)) // 24 % 4096), tmp40, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr9 + (x0 + 4 * (-90112 + x1)) % 24, tmp40, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 + tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp40, tmp43, tmp44) tmp46 = tmp0 >= tmp38 tmp47 = tl.full([1], 131072, tl.int64) tmp48 = tmp0 < tmp47 tmp49 = tmp46 & tmp48 tmp50 = tl.load(in_ptr10 + (4096 * ((x0 + 4 * (-114688 + x1)) % 16) + 65536 * ((x0 + 4 * (-114688 + x1) + 65536 * x2) // 65536 % 4) + (x0 + 4 * (-114688 + x1)) // 16 % 4096), tmp49, eviction_policy= 'evict_last', other=0.0) tmp51 = tl.load(in_ptr11 + (x0 + 4 * (-114688 + x1)) % 16, tmp49, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 + tmp51 tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp49, tmp52, tmp53) tmp55 = tmp0 >= tmp47 tl.full([1], 147456, tl.int64) tmp58 = tl.load(in_ptr12 + (4096 * ((x0 + 4 * (-131072 + x1)) % 16) + 65536 * ((x0 + 4 * (-131072 + x1) + 65536 * x2) // 65536 % 4) + (x0 + 4 * (-131072 + x1)) // 16 % 4096), tmp55, eviction_policy= 'evict_last', other=0.0) tmp59 = tl.load(in_ptr13 + (x0 + 4 * (-131072 + x1)) % 16, tmp55, eviction_policy='evict_last', other=0.0) tmp60 = tmp58 + tmp59 tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp55, tmp60, tmp61) tmp63 = tl.where(tmp49, tmp54, tmp62) tmp64 = tl.where(tmp40, tmp45, tmp63) tmp65 = tl.where(tmp31, tmp36, tmp64) tmp66 = tl.where(tmp22, tmp27, tmp65) tmp67 = tl.where(tmp13, tmp18, tmp66) tmp68 = tl.where(tmp4, tmp9, tmp67) tl.store(out_ptr0 + x3, tmp68, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35) = args args.clear() assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_2, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_5, (24,), (1,)) assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_7, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_8, (24,), (1,)) assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_10, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (24,), (1,)) assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_13, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_14, (24,), (1,)) assert_size_stride(primals_15, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_16, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (16,), (1,)) assert_size_stride(primals_18, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_19, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_20, (16,), (1,)) assert_size_stride(primals_21, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_22, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (16,), (1,)) assert_size_stride(primals_24, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_25, (24,), (1,)) assert_size_stride(primals_26, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (24,), (1,)) assert_size_stride(primals_28, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (24,), (1,)) assert_size_stride(primals_30, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (24,), (1,)) assert_size_stride(primals_32, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_33, (16,), (1,)) assert_size_stride(primals_34, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf3 = extern_kernels.convolution(primals_12, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf4 = extern_kernels.convolution(primals_15, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf5 = extern_kernels.convolution(primals_18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf6 = extern_kernels.convolution(primals_21, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf7 = extern_kernels.convolution(primals_1, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf8 = extern_kernels.convolution(primals_6, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf9 = extern_kernels.convolution(primals_9, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf10 = extern_kernels.convolution(primals_12, primals_28, stride=( 1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf11 = extern_kernels.convolution(primals_15, primals_30, stride=( 1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 24, 64, 64), (98304, 4096, 64, 1)) buf12 = extern_kernels.convolution(primals_18, primals_32, stride=( 1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf13 = extern_kernels.convolution(primals_21, primals_34, stride=( 1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf14 = empty_strided_cuda((4, 147456, 4), (589824, 4, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(2359296)](buf0, primals_3, buf1, primals_5, buf2, primals_8, buf3, primals_11, buf4, primals_14, buf5, primals_17, buf6, primals_20, buf14, 2359296, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del buf1 del buf2 del buf3 del buf4 del buf5 del buf6 del primals_11 del primals_14 del primals_17 del primals_20 del primals_3 del primals_5 del primals_8 buf15 = empty_strided_cuda((4, 147456, 4), (589824, 4, 1), torch. float32) triton_poi_fused_cat_0[grid(2359296)](buf7, primals_23, buf8, primals_25, buf9, primals_27, buf10, primals_29, buf11, primals_31, buf12, primals_33, buf13, primals_35, buf15, 2359296, XBLOCK=512, num_warps=8, num_stages=1) del buf10 del buf11 del buf12 del buf13 del buf7 del buf8 del buf9 del primals_23 del primals_25 del primals_27 del primals_29 del primals_31 del primals_33 del primals_35 return (buf14, buf15, primals_1, primals_2, primals_4, primals_6, primals_7, primals_9, primals_10, primals_12, primals_13, primals_15, primals_16, primals_18, primals_19, primals_21, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34) class PredictionConvolutionsNew(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 24564 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 24564 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutionsNew, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 6, 'conv11_2': 4, 'conv12_2': 4} self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size= 3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) self.loc_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * 4, kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * n_classes, kernel_size=3, padding=1) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, input_0, input_1, input_2, input_3, input_4, input_5, input_6): primals_2 = self.loc_conv4_3.weight primals_3 = self.loc_conv4_3.bias primals_4 = self.loc_conv7.weight primals_5 = self.loc_conv7.bias primals_7 = self.loc_conv8_2.weight primals_8 = self.loc_conv8_2.bias primals_10 = self.loc_conv9_2.weight primals_11 = self.loc_conv9_2.bias primals_13 = self.loc_conv10_2.weight primals_14 = self.loc_conv10_2.bias primals_16 = self.loc_conv11_2.weight primals_17 = self.loc_conv11_2.bias primals_19 = self.loc_conv12_2.weight primals_20 = self.loc_conv12_2.bias primals_22 = self.cl_conv4_3.weight primals_23 = self.cl_conv4_3.bias primals_24 = self.cl_conv7.weight primals_25 = self.cl_conv7.bias primals_26 = self.cl_conv8_2.weight primals_27 = self.cl_conv8_2.bias primals_28 = self.cl_conv9_2.weight primals_29 = self.cl_conv9_2.bias primals_30 = self.cl_conv10_2.weight primals_31 = self.cl_conv10_2.bias primals_32 = self.cl_conv11_2.weight primals_33 = self.cl_conv11_2.bias primals_34 = self.cl_conv12_2.weight primals_35 = self.cl_conv12_2.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_12 = input_3 primals_15 = input_4 primals_18 = input_5 primals_21 = input_6 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35]) return output[0], output[1]
doduythao/ssd
PredictionConvolutions
false
12,633
[ "MIT" ]
0
170064a3edef05d3274b08ea7f622eb3238b5c5c
https://github.com/doduythao/ssd/tree/170064a3edef05d3274b08ea7f622eb3238b5c5c
GCN
from torch.nn import Module import math import torch from torch.nn.parameter import Parameter from torch.nn.modules.module import Module import torch.nn as nn import torch.nn.functional as F class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, 1024) self.gc2 = GraphConvolution(1024, 1024) self.gc3 = GraphConvolution(1024, 1024) self.gc4 = GraphConvolution(1024, nhid) self.linear1 = nn.Linear(nhid, 2048) self.linear2 = nn.Linear(2048, 2048) self.linear3 = nn.Linear(2048, 2048) self.linear4 = nn.Linear(2048, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.gc2(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.gc3(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc4(x, adj) x = F.relu(self.linear1(x)) x = F.relu(self.linear2(x)) x = F.relu(self.linear3(x)) return self.linear4(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import math from torch.nn.parameter import Parameter from torch.nn.modules.module import Module import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 1024), (1024, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1024,), (1,)) assert_size_stride(primals_5, (1024, 1024), (1024, 1)) assert_size_stride(primals_6, (1024,), (1,)) assert_size_stride(primals_7, (1024, 1024), (1024, 1)) assert_size_stride(primals_8, (1024,), (1,)) assert_size_stride(primals_9, (1024, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (2048, 4), (4, 1)) assert_size_stride(primals_12, (2048,), (1,)) assert_size_stride(primals_13, (2048, 2048), (2048, 1)) assert_size_stride(primals_14, (2048,), (1,)) assert_size_stride(primals_15, (2048, 2048), (2048, 1)) assert_size_stride(primals_16, (2048,), (1,)) assert_size_stride(primals_17, (4, 2048), (2048, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(4096)](buf2, primals_4, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf4 del buf4 triton_poi_fused_add_relu_0[grid(4096)](buf5, primals_6, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = buf3 del buf3 extern_kernels.mm(buf5, primals_7, out=buf6) buf7 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, buf6, out=buf7) del buf6 buf8 = buf7 del buf7 triton_poi_fused_add_relu_0[grid(4096)](buf8, primals_8, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf8, primals_9, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, primals_3, buf9, alpha=1, beta=1, out=buf10) del primals_10 buf11 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (4, 2048), (1, 4), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_1[grid(8192)](buf12, primals_12, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_12 buf13 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (2048, 2048 ), (1, 2048), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_1[grid(8192)](buf14, primals_14, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_14 buf15 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf14, reinterpret_tensor(primals_15, (2048, 2048 ), (1, 2048), 0), out=buf15) buf16 = buf15 del buf15 triton_poi_fused_relu_1[grid(8192)](buf16, primals_16, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_16 buf17 = buf9 del buf9 extern_kernels.addmm(primals_18, buf16, reinterpret_tensor( primals_17, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf17) del primals_18 return (buf17, buf2, buf5, buf8, buf10, buf12, buf14, buf16, primals_17, primals_15, primals_13, primals_11, reinterpret_tensor(primals_3, ( 4, 4), (1, 4), 0), reinterpret_tensor(primals_9, (4, 1024), (1, 4), 0), reinterpret_tensor(primals_7, (1024, 1024), (1, 1024), 0), reinterpret_tensor(primals_5, (1024, 1024), (1, 1024), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, 1024) self.gc2 = GraphConvolution(1024, 1024) self.gc3 = GraphConvolution(1024, 1024) self.gc4 = GraphConvolution(1024, nhid) self.linear1 = nn.Linear(nhid, 2048) self.linear2 = nn.Linear(2048, 2048) self.linear3 = nn.Linear(2048, 2048) self.linear4 = nn.Linear(2048, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_5 = self.gc2.weight primals_6 = self.gc2.bias primals_7 = self.gc3.weight primals_8 = self.gc3.bias primals_9 = self.gc4.weight primals_10 = self.gc4.bias primals_11 = self.linear1.weight primals_12 = self.linear1.bias primals_13 = self.linear2.weight primals_14 = self.linear2.bias primals_15 = self.linear3.weight primals_16 = self.linear3.bias primals_17 = self.linear4.weight primals_18 = self.linear4.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
jarvis08/gpackage-gcn-torch
GCN
false
12,634
[ "MIT" ]
0
5e483ea3012dfd0f23b194519c1295e3efcbdc35
https://github.com/jarvis08/gpackage-gcn-torch/tree/5e483ea3012dfd0f23b194519c1295e3efcbdc35
TransformerDecoderLayer
import math import torch from torch import nn import torch.nn.functional as F def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, dropout=0): super(TransformerFFN, self).__init__() self.dropout = nn.Dropout(p=dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.dropout(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerDecoderLayer(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def forward(self, x, encoder_output, encoder_mask): decoder_mask = self._create_selfattn_mask(x) residual = x x = self.self_attention(query=x, mask=decoder_mask) x = x + residual x = _normalize(x, self.norm1) residual = x x = self.encoder_attention(query=x, key=encoder_output, value= encoder_output, mask=encoder_mask) x = residual + x x = _normalize(x, self.norm2) residual = x x = self.ffn(x) x = residual + x x = _normalize(x, self.norm3) return x def _create_selfattn_mask(self, x): bsz = x.size(0) time = x.size(1) mask = torch.tril(x.new(time, time).fill_(1)) mask = mask.unsqueeze(0).expand(bsz, -1, -1) return mask def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'embedding_size': 4, 'ffn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex tmp0 = x0 + -1 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 == tmp4 tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_repeat_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp8 = tl.where(tmp6, tmp2, tmp7) tmp9 = tmp8 * tmp4 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tl.where(tmp11, tmp2, tmp12) tmp14 = tmp13 * tmp4 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tl.where(tmp16, tmp2, tmp17) tmp19 = tmp18 * tmp4 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tl_math.exp(tmp22) tmp24 = tmp9 - tmp20 tmp25 = tmp24 * tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp14 - tmp20 tmp29 = tmp28 * tmp4 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tmp32 = tmp19 - tmp20 tmp33 = tmp32 * tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp31 + tmp34 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000200408773e+20 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp4 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_13(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4, 4), (4, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4, 4), (4, 1)) assert_size_stride(primals_25, (4,), (1,)) assert_size_stride(primals_26, (4, 4), (4, 1)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf3, primals_7, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_5, buf5, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_repeat_1[grid(256)](buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0) del buf2 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(64)](buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_masked_fill_3[grid(256)](buf10, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0) del buf9 extern_kernels.bmm(buf10, reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_9 buf14 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf15 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused_native_layer_norm_5[grid(16)](buf13, primals_1, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](buf13, primals_1, buf14, buf15, primals_10, primals_11, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf17, primals_14, buf18, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_14 buf19 = buf17 del buf17 extern_kernels.mm(reinterpret_tensor(primals_12, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) del primals_15 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_12, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf20) del primals_17 buf21 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf20, primals_18, buf21, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_18 buf22 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf20 triton_poi_fused_clone_0[grid(16, 4)](buf19, primals_16, buf22, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_16 buf23 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 0, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.bool) triton_poi_fused_repeat_7[grid(64)](primals_19, buf24, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_19 buf25 = reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 64), 0) del buf19 buf26 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_8[grid(64)](buf24, buf23, buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = buf23 del buf23 triton_poi_fused__softmax_masked_fill_9[grid(256)](buf27, buf24, buf25, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1) buf28 = reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 1), 0) del buf26 extern_kernels.bmm(buf27, reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 0), 0), out=buf28) buf29 = reinterpret_tensor(buf25, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf25 triton_poi_fused_clone_4[grid(16, 4)](buf28, buf29, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf30 = reinterpret_tensor(buf28, (16, 4), (4, 1), 0) del buf28 extern_kernels.mm(reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0) del buf30 triton_poi_fused_add_10[grid(64)](buf31, buf16, primals_21, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_21 buf32 = buf15 del buf15 buf33 = buf14 del buf14 triton_poi_fused_native_layer_norm_11[grid(16)](buf31, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf31, buf32, buf33, primals_22, primals_23, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_23 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf34, reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf35) buf36 = reinterpret_tensor(buf35, (4, 4, 4), (16, 4, 1), 0) del buf35 buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_13[grid(64)](buf36, primals_25, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_25 buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(primals_26, (4, 4), (1, 4), 0), out=buf37) buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0) del buf37 triton_poi_fused_add_10[grid(64)](buf38, buf34, primals_27, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_27 buf39 = buf33 del buf33 buf40 = buf32 del buf32 triton_poi_fused_native_layer_norm_11[grid(16)](buf38, buf39, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_12[grid(64)](buf38, buf39, buf40, primals_28, primals_29, buf41, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf39 del buf40 del primals_29 return (reinterpret_tensor(buf41, (4, 4, 4), (16, 4, 1), 0), primals_1, primals_10, primals_22, primals_28, buf7, buf10, reinterpret_tensor (buf12, (16, 4), (4, 1), 0), buf13, buf16, reinterpret_tensor( primals_12, (16, 4), (4, 1), 0), buf24, buf27, reinterpret_tensor( buf29, (16, 4), (4, 1), 0), reinterpret_tensor(buf31, (16, 4), (4, 1), 0), buf34, reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(buf38, (16, 4), (4, 1), 0), primals_26, buf42, primals_24, primals_20, reinterpret_tensor(buf21, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf18, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0), primals_13, primals_8, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)) def _normalize(tensor, norm_layer): """ Broadcast layer norm """ size = tensor.size() return norm_layer(tensor.view(-1, size[-1])).view(size) class MultiHeadAttention(nn.Module): def __init__(self, n_heads, dim, dropout=0): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads self.dim = dim self.dropout = nn.Dropout(p=dropout) self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.q_lin.weight) nn.init.xavier_normal_(self.k_lin.weight) nn.init.xavier_normal_(self.v_lin.weight) self.out_lin = nn.Linear(dim, dim) nn.init.xavier_normal_(self.out_lin.weight) def forward(self, query, key=None, value=None, mask=None): batch_size, query_len, dim = query.size() assert dim == self.dim, f'Dimensions do not match: {dim} query vs {self.dim} configured' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) def prepare_head(tensor): _bsz, seq_len, _ = tensor.size() tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) tensor = tensor.transpose(1, 2).contiguous().view(batch_size * n_heads, seq_len, dim_per_head) return tensor if key is None and value is None: key = value = query elif value is None: value = key _, key_len, dim = key.size() q = prepare_head(self.q_lin(query)) k = prepare_head(self.k_lin(key)) v = prepare_head(self.v_lin(value)) dot_prod = q.bmm(k.transpose(1, 2)) attn_mask = (mask == 0).view(batch_size, 1, -1, key_len).repeat(1, n_heads, 1, 1).expand(batch_size, n_heads, query_len, key_len ).view(batch_size * n_heads, query_len, key_len) assert attn_mask.shape == dot_prod.shape dot_prod.masked_fill_(attn_mask, -float(1e+20)) attn_weights = F.softmax(dot_prod / scale, dim=-1) attentioned = attn_weights.bmm(v) attentioned = attentioned.view(batch_size, n_heads, query_len, dim_per_head).transpose(1, 2).contiguous().view(batch_size, query_len, dim) out = self.out_lin(attentioned) return out class TransformerFFN(nn.Module): def __init__(self, dim, dim_hidden, dropout=0): super(TransformerFFN, self).__init__() self.dropout = nn.Dropout(p=dropout) self.lin1 = nn.Linear(dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, dim) nn.init.xavier_uniform_(self.lin1.weight) nn.init.xavier_uniform_(self.lin2.weight) def forward(self, x): x = F.relu(self.lin1(x)) x = self.dropout(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerDecoderLayerNew(nn.Module): def __init__(self, n_heads, embedding_size, ffn_size, attention_dropout =0.0, relu_dropout=0.0): super().__init__() self.dim = embedding_size self.ffn_dim = ffn_size self.self_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm1 = nn.LayerNorm(embedding_size) self.encoder_attention = MultiHeadAttention(n_heads, embedding_size, dropout=attention_dropout) self.norm2 = nn.LayerNorm(embedding_size) self.ffn = TransformerFFN(embedding_size, ffn_size, dropout= relu_dropout) self.norm3 = nn.LayerNorm(embedding_size) def _create_selfattn_mask(self, x): bsz = x.size(0) time = x.size(1) mask = torch.tril(x.new(time, time).fill_(1)) mask = mask.unsqueeze(0).expand(bsz, -1, -1) return mask def forward(self, input_0, input_1, input_2): primals_2 = self.self_attention.q_lin.weight primals_3 = self.self_attention.q_lin.bias primals_4 = self.self_attention.k_lin.weight primals_5 = self.self_attention.k_lin.bias primals_6 = self.self_attention.v_lin.weight primals_7 = self.self_attention.v_lin.bias primals_8 = self.self_attention.out_lin.weight primals_9 = self.self_attention.out_lin.bias primals_10 = self.norm1.weight primals_11 = self.norm1.bias primals_13 = self.encoder_attention.q_lin.weight primals_14 = self.encoder_attention.q_lin.bias primals_15 = self.encoder_attention.k_lin.weight primals_16 = self.encoder_attention.k_lin.bias primals_17 = self.encoder_attention.v_lin.weight primals_18 = self.encoder_attention.v_lin.bias primals_19 = self.encoder_attention.out_lin.weight primals_21 = self.encoder_attention.out_lin.bias primals_22 = self.norm2.weight primals_23 = self.norm2.bias primals_20 = self.ffn.lin1.weight primals_25 = self.ffn.lin1.bias primals_24 = self.ffn.lin2.weight primals_27 = self.ffn.lin2.bias primals_28 = self.norm3.weight primals_29 = self.norm3.bias primals_1 = input_0 primals_12 = input_1 primals_26 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return output[0]
jinjiren/ParlAI
TransformerDecoderLayer
false
12,635
[ "MIT" ]
0
40799aeee69f2a0bb25a1341bb8da0c44861268e
https://github.com/jinjiren/ParlAI/tree/40799aeee69f2a0bb25a1341bb8da0c44861268e
AugCNN
import torch import torch.nn as nn import torch.nn.functional as F def apply_init_(modules): """ Initialize NN modules """ for m in modules: if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) class Conv2d_tf(nn.Conv2d): """ Conv2d with the padding behavior from TF """ def __init__(self, *args, **kwargs): super(Conv2d_tf, self).__init__(*args, **kwargs) self.padding = kwargs.get('padding', 'SAME') def _compute_padding(self, input, dim): input_size = input.size(dim + 2) filter_size = self.weight.size(dim + 2) effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1 out_size = (input_size + self.stride[dim] - 1) // self.stride[dim] total_padding = max(0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size) additional_padding = int(total_padding % 2 != 0) return additional_padding, total_padding def forward(self, input): if self.padding == 'VALID': return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups) rows_odd, padding_rows = self._compute_padding(input, dim=0) cols_odd, padding_cols = self._compute_padding(input, dim=1) if rows_odd or cols_odd: input = F.pad(input, [0, cols_odd, 0, rows_odd]) return F.conv2d(input, self.weight, self.bias, self.stride, padding =(padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) class AugCNN(nn.Module): """ Convolutional Neural Network used as Augmentation """ def __init__(self): super(AugCNN, self).__init__() self.aug = Conv2d_tf(3, 3, kernel_size=3) apply_init_(self.modules()) self.train() def forward(self, obs): return self.aug(obs) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_3 return buf1, primals_1, primals_2 def apply_init_(modules): """ Initialize NN modules """ for m in modules: if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) class Conv2d_tf(nn.Conv2d): """ Conv2d with the padding behavior from TF """ def __init__(self, *args, **kwargs): super(Conv2d_tf, self).__init__(*args, **kwargs) self.padding = kwargs.get('padding', 'SAME') def _compute_padding(self, input, dim): input_size = input.size(dim + 2) filter_size = self.weight.size(dim + 2) effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1 out_size = (input_size + self.stride[dim] - 1) // self.stride[dim] total_padding = max(0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size) additional_padding = int(total_padding % 2 != 0) return additional_padding, total_padding def forward(self, input): if self.padding == 'VALID': return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups) rows_odd, padding_rows = self._compute_padding(input, dim=0) cols_odd, padding_cols = self._compute_padding(input, dim=1) if rows_odd or cols_odd: input = F.pad(input, [0, cols_odd, 0, rows_odd]) return F.conv2d(input, self.weight, self.bias, self.stride, padding =(padding_rows // 2, padding_cols // 2), dilation=self.dilation, groups=self.groups) class AugCNNNew(nn.Module): """ Convolutional Neural Network used as Augmentation """ def __init__(self): super(AugCNNNew, self).__init__() self.aug = Conv2d_tf(3, 3, kernel_size=3) apply_init_(self.modules()) self.train() def forward(self, input_0): primals_2 = self.aug.weight primals_3 = self.aug.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
joshnroy/contrastive-rl
AugCNN
false
12,636
[ "MIT" ]
0
d0e8cd8fd6963983dc62dd282b788002a892704e
https://github.com/joshnroy/contrastive-rl/tree/d0e8cd8fd6963983dc62dd282b788002a892704e
GlobalAttention
import torch import torch.nn as nn import torch.nn.functional as F class GlobalAttention(nn.Module): """ Global Attention between encoder and decoder """ def __init__(self, key_features, query_features, value_features, hidden_features=None, dropout=0.0): """ Args: key_features: int dimension of keys query_features: int dimension of queries value_features: int dimension of values (outputs) hidden_features: int dimension of hidden states (default value_features) dropout: float dropout rate """ super(GlobalAttention, self).__init__() if hidden_features is None: hidden_features = value_features self.key_proj = nn.Linear(key_features, 2 * hidden_features, bias=True) self.query_proj = nn.Linear(query_features, hidden_features, bias=True) self.dropout = dropout self.fc = nn.Linear(hidden_features, value_features) self.hidden_features = hidden_features self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.key_proj.weight) nn.init.constant_(self.key_proj.bias, 0) nn.init.xavier_uniform_(self.query_proj.weight) nn.init.constant_(self.query_proj.bias, 0) nn.init.xavier_uniform_(self.fc.weight) nn.init.constant_(self.fc.bias, 0) def forward(self, query, key, key_mask=None): """ Args: query: Tensor query tensor [batch, query_length, query_features] key: Tensor key tensor [batch, key_length, key_features] key_mask: ByteTensor or None binary ByteTensor [batch, src_len] padding elements are indicated by 1s. Returns: Tensor value tensor [batch, query_length, value_features] """ bs, timesteps, _ = key.size() dim = self.hidden_features query = self.query_proj(query) c = self.key_proj(key) c = c.view(bs, timesteps, 2, dim) key = c[:, :, 0] value = c[:, :, 1] attn_weights = torch.bmm(query, key.transpose(1, 2)) if key_mask is not None: attn_weights = attn_weights.masked_fill(key_mask.unsqueeze(1), float('-inf')) attn_weights = F.softmax(attn_weights.float(), dim=-1, dtype=torch. float32 if attn_weights.dtype == torch.float16 else attn_weights.dtype) out = torch.bmm(attn_weights, value) out = F.dropout(self.fc(out), p=self.dropout, training=self.training) return out def init(self, query, key, key_mask=None, init_scale=1.0): with torch.no_grad(): return self(query, key, key_mask=key_mask) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'key_features': 4, 'query_features': 4, 'value_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) assert_size_stride(primals_6, (8,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (32, 1, 8), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(buf4, reinterpret_tensor(buf1, (4, 4, 4), (32, 8, 1), 4), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (32, 1, 8), 4 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 4), (32, 8, 1), 0) class GlobalAttentionNew(nn.Module): """ Global Attention between encoder and decoder """ def __init__(self, key_features, query_features, value_features, hidden_features=None, dropout=0.0): """ Args: key_features: int dimension of keys query_features: int dimension of queries value_features: int dimension of values (outputs) hidden_features: int dimension of hidden states (default value_features) dropout: float dropout rate """ super(GlobalAttentionNew, self).__init__() if hidden_features is None: hidden_features = value_features self.key_proj = nn.Linear(key_features, 2 * hidden_features, bias=True) self.query_proj = nn.Linear(query_features, hidden_features, bias=True) self.dropout = dropout self.fc = nn.Linear(hidden_features, value_features) self.hidden_features = hidden_features self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.key_proj.weight) nn.init.constant_(self.key_proj.bias, 0) nn.init.xavier_uniform_(self.query_proj.weight) nn.init.constant_(self.query_proj.bias, 0) nn.init.xavier_uniform_(self.fc.weight) nn.init.constant_(self.fc.bias, 0) def init(self, query, key, key_mask=None, init_scale=1.0): with torch.no_grad(): return self(query, key, key_mask=key_mask) def forward(self, input_0, input_1): primals_5 = self.key_proj.weight primals_6 = self.key_proj.bias primals_2 = self.query_proj.weight primals_3 = self.query_proj.bias primals_7 = self.fc.weight primals_8 = self.fc.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
juheeuu/flowseq
GlobalAttention
false
12,637
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
MultiheadAttention
import math import torch import torch.nn as nn import torch as th class QKVMultiheadAttention(nn.Module): def __init__(self, n_heads: 'int', n_ctx: 'int'): super().__init__() self.n_heads = n_heads self.n_ctx = n_ctx def forward(self, qkv): bs, n_ctx, width = qkv.shape attn_ch = width // self.n_heads // 3 scale = 1 / math.sqrt(math.sqrt(attn_ch)) qkv = qkv.view(bs, n_ctx, self.n_heads, -1) q, k, v = th.split(qkv, attn_ch, dim=-1) weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale) wdtype = weight.dtype weight = th.softmax(weight.float(), dim=-1).type(wdtype) return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1) class MultiheadAttention(nn.Module): def __init__(self, n_ctx, width, heads): super().__init__() self.n_ctx = n_ctx self.width = width self.heads = heads self.c_qkv = nn.Linear(width, width * 3) self.c_proj = nn.Linear(width, width) self.attention = QKVMultiheadAttention(heads, n_ctx) def forward(self, x): x = self.c_qkv(x) x = self.attention(x) x = self.c_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_ctx': 4, 'width': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = xindex // 16 tmp0 = tl.load(in_ptr0 + 3 * x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr2 + (8 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr2 + (12 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp4 * tmp7 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp4 * tmp10 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp4 * tmp13 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp16 = tmp6 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp8 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp15, xmask) tl.store(out_ptr2 + x4, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x2 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp7, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 + 3 * x1 + 12 * x0 + 48 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf0, primals_2, buf2, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) triton_poi_fused__softmax_mul_1[grid(64)](buf0, primals_2, buf2, buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf1, buf2, buf3, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf4 triton_poi_fused_clone_3[grid(64)](buf0, primals_2, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_2 buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0) del buf9 triton_poi_fused_add_5[grid(64)](buf10, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0 ), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0) class QKVMultiheadAttention(nn.Module): def __init__(self, n_heads: 'int', n_ctx: 'int'): super().__init__() self.n_heads = n_heads self.n_ctx = n_ctx def forward(self, qkv): bs, n_ctx, width = qkv.shape attn_ch = width // self.n_heads // 3 scale = 1 / math.sqrt(math.sqrt(attn_ch)) qkv = qkv.view(bs, n_ctx, self.n_heads, -1) q, k, v = th.split(qkv, attn_ch, dim=-1) weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale) wdtype = weight.dtype weight = th.softmax(weight.float(), dim=-1).type(wdtype) return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1) class MultiheadAttentionNew(nn.Module): def __init__(self, n_ctx, width, heads): super().__init__() self.n_ctx = n_ctx self.width = width self.heads = heads self.c_qkv = nn.Linear(width, width * 3) self.c_proj = nn.Linear(width, width) self.attention = QKVMultiheadAttention(heads, n_ctx) def forward(self, input_0): primals_1 = self.c_qkv.weight primals_2 = self.c_qkv.bias primals_4 = self.c_proj.weight primals_5 = self.c_proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
johnpaulbin/glide-text2im
MultiheadAttention
false
12,638
[ "MIT" ]
0
4897050c4c540316dfb1ec7e6ff95698bcb20487
https://github.com/johnpaulbin/glide-text2im/tree/4897050c4c540316dfb1ec7e6ff95698bcb20487
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 32 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 x4 = xindex x3 = xindex // 3136 x5 = xindex % 3136 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x4, tmp6, xmask) tl.store(out_ptr1 + (x5 + 3200 * x3), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (120, 800), (800, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(50176)](buf1, primals_2, 50176, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 14, 14), (3136, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 14, 14), (3200, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(12544)](buf1, buf2, buf3, 12544, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 10, 10), (3200, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(12800)](buf5, primals_5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(3200)](buf5, buf6, buf7, 3200, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 800), (800, 1), 0), reinterpret_tensor(primals_6, (800, 120), (1, 800), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 800), (800, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
juliowaissman/cifar10-jwv
Net
false
12,639
[ "MIT" ]
0
a279ccf51f0e8cbacfcc34a9eee381c16ae536fc
https://github.com/juliowaissman/cifar10-jwv/tree/a279ccf51f0e8cbacfcc34a9eee381c16ae536fc
Disc
import torch import torch.nn as nn import torch.nn.functional as F class Disc(nn.Module): def __init__(self, N, z_dim): super(Disc, self).__init__() self.lin1 = nn.Linear(z_dim, N) self.lin2 = nn.Linear(N, N) self.lin3 = nn.Linear(N, 1) def forward(self, x): x = F.dropout(self.lin1(x), p=0.2, training=self.training) x = F.relu(x) x = F.dropout(self.lin2(x), p=0.2, training=self.training) x = F.relu(x) return F.sigmoid(self.lin3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'N': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_sigmoid_1[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7 class DiscNew(nn.Module): def __init__(self, N, z_dim): super(DiscNew, self).__init__() self.lin1 = nn.Linear(z_dim, N) self.lin2 = nn.Linear(N, N) self.lin3 = nn.Linear(N, 1) def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_6 = self.lin3.weight primals_7 = self.lin3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
junhahyung/Pytorch-Sketch-RNN
Disc
false
12,640
[ "MIT" ]
0
7aa82755fdfdb9bd36f8a83f1cfc0ade43e50a7a
https://github.com/junhahyung/Pytorch-Sketch-RNN/tree/7aa82755fdfdb9bd36f8a83f1cfc0ade43e50a7a
GumbelSoftMaxSampler
import torch from torch.nn import functional as F from torch import nn from typing import * class GumbelSoftMaxSampler(nn.Module): def __init__(self, hard=False): super().__init__() self.hard = hard def forward(self, logits): return F.gumbel_softmax(logits=logits, hard=self.hard) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from typing import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_add_log_neg_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tl_math.log(tmp1) tmp3 = -tmp2 tmp4 = tmp0 + tmp3 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp9 = tl_math.log(tmp8) tmp10 = -tmp9 tmp11 = tmp7 + tmp10 tmp12 = tmp11 * tmp5 tmp13 = triton_helpers.maximum(tmp6, tmp12) tmp16 = tl_math.log(tmp15) tmp17 = -tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp18 * tmp5 tmp20 = triton_helpers.maximum(tmp13, tmp19) tmp23 = tl_math.log(tmp22) tmp24 = -tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25 * tmp5 tmp27 = triton_helpers.maximum(tmp20, tmp26) tmp28 = tmp6 - tmp27 tmp29 = tmp28 * tmp5 tmp30 = tl_math.exp(tmp29) tmp31 = tmp12 - tmp27 tmp32 = tmp31 * tmp5 tmp33 = tl_math.exp(tmp32) tmp34 = tmp30 + tmp33 tmp35 = tmp19 - tmp27 tmp36 = tmp35 * tmp5 tmp37 = tl_math.exp(tmp36) tmp38 = tmp34 + tmp37 tmp39 = tmp26 - tmp27 tmp40 = tmp39 * tmp5 tmp41 = tl_math.exp(tmp40) tmp42 = tmp38 + tmp41 tl.store(out_ptr0 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp42, xmask) @triton.jit def triton_poi_fused__softmax_add_log_neg_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tl_math.log(tmp1) tmp3 = -tmp2 tmp4 = tmp0 + tmp3 tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp5 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(in_out_ptr0 + x2, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = torch.ops.aten.exponential.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_log_neg_0[grid(64)](arg0_1, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf2 del buf2 triton_poi_fused__softmax_add_log_neg_1[grid(256)](buf5, arg0_1, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf3 del buf4 return buf5, class GumbelSoftMaxSamplerNew(nn.Module): def __init__(self, hard=False): super().__init__() self.hard = hard def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jvrana/deep-learning-guides
GumbelSoftMaxSampler
false
12,641
[ "MIT" ]
0
18b7a0808073dd7b345e7a683dd7ee89e97e47ce
https://github.com/jvrana/deep-learning-guides/tree/18b7a0808073dd7b345e7a683dd7ee89e97e47ce
Gaussian
import torch from torch import Tensor import torch.utils.tensorboard import torch.utils.data class Gaussian(torch.nn.Module): """Gaussian activation""" def forward(self, x: 'Tensor') ->Tensor: return torch.exp(-x * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.tensorboard import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = tmp1 * tmp0 tmp3 = tl_math.exp(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GaussianNew(torch.nn.Module): """Gaussian activation""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
isayev/torchani
Gaussian
false
12,642
[ "MIT" ]
0
f8edffe384e2cb2eebe3a7e04faa01b6f5e26b37
https://github.com/isayev/torchani/tree/f8edffe384e2cb2eebe3a7e04faa01b6f5e26b37
NodeAdaptiveEncoder
import torch import torch.utils.data import torch.nn as nn import torch as torch class NodeAdaptiveEncoder(nn.Module): def __init__(self, num_features, dropout=0.5): super(NodeAdaptiveEncoder, self).__init__() self.fc = nn.Parameter(torch.zeros(size=(num_features, 1))) nn.init.xavier_normal_(self.fc.data, gain=1.414) self.bf = nn.Parameter(torch.zeros(size=(1,))) self.dropout = torch.nn.Dropout(dropout) def forward(self, x): h = torch.mm(x, self.fc) + self.bf h = torch.sigmoid(h) h = self.dropout(h) return torch.where(x < 0, torch.zeros_like(x), x) + h * torch.where( x > 0, torch.zeros_like(x), x) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 < tmp1 tmp3 = tl.where(tmp2, tmp1, tmp0) tmp7 = tmp4 + tmp6 tmp8 = tl.sigmoid(tmp7) tmp9 = tmp0 > tmp1 tmp10 = tl.where(tmp9, tmp1, tmp0) tmp11 = tmp8 * tmp10 tmp12 = tmp3 + tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_gt_lt_mul_sigmoid_where_zeros_like_0[grid(16)]( primals_2, buf0, primals_3, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_2, primals_3, buf0 class NodeAdaptiveEncoderNew(nn.Module): def __init__(self, num_features, dropout=0.5): super(NodeAdaptiveEncoderNew, self).__init__() self.fc = nn.Parameter(torch.zeros(size=(num_features, 1))) nn.init.xavier_normal_(self.fc.data, gain=1.414) self.bf = nn.Parameter(torch.zeros(size=(1,))) self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.fc primals_3 = self.bf primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ckhui/cogdl
NodeAdaptiveEncoder
false
12,643
[ "MIT" ]
0
93bea17c2dc7084857cd0a4af8178c174965127c
https://github.com/ckhui/cogdl/tree/93bea17c2dc7084857cd0a4af8178c174965127c
InvertibleMultiHeadFlow
import torch from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class InvertibleMultiHeadFlow(Flow): @staticmethod def _get_heads(in_features): units = [32, 16, 8] for unit in units: if in_features % unit == 0: return in_features // unit assert in_features < 8, 'features={}'.format(in_features) return 1 def __init__(self, in_features, heads=None, type='A', inverse=False): super(InvertibleMultiHeadFlow, self).__init__(inverse) self.in_features = in_features if heads is None: heads = InvertibleMultiHeadFlow._get_heads(in_features) self.heads = heads self.type = type assert in_features % heads == 0, 'features ({}) should be divided by heads ({})'.format( in_features, heads) assert type in ['A', 'B'], 'type should belong to [A, B]' self.weight = Parameter(torch.Tensor(in_features // heads, in_features // heads)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) def forward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ size = input.size() dim = input.dim() if self.type == 'A': out = input.view(*size[:-1], self.heads, self.in_features // self.heads) else: out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1) out = F.linear(out, self.weight) if self.type == 'B': out = out.transpose(-2, -1).contiguous() out = out.view(*size) _, logdet = torch.linalg.slogdet(self.weight) if dim > 2: num = mask.view(size[0], -1).sum(dim=1) * self.heads logdet = logdet * num return out, logdet def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ size = input.size() dim = input.dim() if self.type == 'A': out = input.view(*size[:-1], self.heads, self.in_features // self.heads) else: out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1) out = F.linear(out, self.weight_inv) if self.type == 'B': out = out.transpose(-2, -1).contiguous() out = out.view(*size) _, logdet = torch.linalg.slogdet(self.weight_inv) if dim > 2: num = mask.view(size[0], -1).sum(dim=1) * self.heads logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: with torch.no_grad(): return self.forward(data, mask) def extra_repr(self): return 'inverse={}, in_features={}, heads={}, type={}'.format(self. inverse, self.in_features, self.heads, self.type) @classmethod def from_params(cls, params: 'Dict') ->'InvertibleMultiHeadFlow': return InvertibleMultiHeadFlow(**params) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + 0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1.0 tmp6 = tmp4 * tmp5 tmp9 = tmp8 * tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = torch.ops.aten._linalg_slogdet.default(primals_2) del primals_2 buf3 = buf1[1] buf4 = buf1[2] buf5 = buf1[3] del buf1 buf6 = empty_strided_cuda((4,), (1,), torch.float32) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](buf7, primals_3, buf3, buf8, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf3 del primals_3 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf4, buf5, buf7 class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class InvertibleMultiHeadFlowNew(Flow): @staticmethod def _get_heads(in_features): units = [32, 16, 8] for unit in units: if in_features % unit == 0: return in_features // unit assert in_features < 8, 'features={}'.format(in_features) return 1 def __init__(self, in_features, heads=None, type='A', inverse=False): super(InvertibleMultiHeadFlowNew, self).__init__(inverse) self.in_features = in_features if heads is None: heads = InvertibleMultiHeadFlowNew._get_heads(in_features) self.heads = heads self.type = type assert in_features % heads == 0, 'features ({}) should be divided by heads ({})'.format( in_features, heads) assert type in ['A', 'B'], 'type should belong to [A, B]' self.weight = Parameter(torch.Tensor(in_features // heads, in_features // heads)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ size = input.size() dim = input.dim() if self.type == 'A': out = input.view(*size[:-1], self.heads, self.in_features // self.heads) else: out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1) out = F.linear(out, self.weight_inv) if self.type == 'B': out = out.transpose(-2, -1).contiguous() out = out.view(*size) _, logdet = torch.linalg.slogdet(self.weight_inv) if dim > 2: num = mask.view(size[0], -1).sum(dim=1) * self.heads logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: with torch.no_grad(): return self.forward(data, mask) def extra_repr(self): return 'inverse={}, in_features={}, heads={}, type={}'.format(self. inverse, self.in_features, self.heads, self.type) @classmethod def from_params(cls, params: 'Dict') ->'InvertibleMultiHeadFlow': return InvertibleMultiHeadFlowNew(**params) def forward(self, input_0, input_1): primals_2 = self.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
juheeuu/flowseq
InvertibleMultiHeadFlow
false
12,644
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
is_she_mad
import torch import torch.nn.functional as F import torch.nn as nn class is_she_mad(nn.Module): def __init__(self, modality_size): super(is_she_mad, self).__init__() self.fc1 = nn.Linear(modality_size, 200) self.fc2 = nn.Linear(200, 128) self.fc3 = nn.Linear(128, 1) def forward(self, x): out = self.fc1(x) out = F.relu(out) out = self.fc2(out) out = F.relu(out) out = self.fc3(out) out = F.sigmoid(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'modality_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 200), (200, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(12800)](buf1, primals_2, buf7, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 128), (1, 200), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3, primals_5, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 200), (200, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 class is_she_madNew(nn.Module): def __init__(self, modality_size): super(is_she_madNew, self).__init__() self.fc1 = nn.Linear(modality_size, 200) self.fc2 = nn.Linear(200, 128) self.fc3 = nn.Linear(128, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jryzkns/IsSheMadAtMe
is_she_mad
false
12,645
[ "MIT" ]
0
7776fb9730dab56f42418460efa0c2dec3988e46
https://github.com/jryzkns/IsSheMadAtMe/tree/7776fb9730dab56f42418460efa0c2dec3988e46
ResidualAttentionBlock
import math import torch import torch.nn as nn import torch as th class LayerNorm(nn.LayerNorm): """ Implementation that supports fp16 inputs but fp32 gains/biases. """ def forward(self, x: 'th.Tensor'): return super().forward(x.float()) class QKVMultiheadAttention(nn.Module): def __init__(self, n_heads: 'int', n_ctx: 'int'): super().__init__() self.n_heads = n_heads self.n_ctx = n_ctx def forward(self, qkv): bs, n_ctx, width = qkv.shape attn_ch = width // self.n_heads // 3 scale = 1 / math.sqrt(math.sqrt(attn_ch)) qkv = qkv.view(bs, n_ctx, self.n_heads, -1) q, k, v = th.split(qkv, attn_ch, dim=-1) weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale) wdtype = weight.dtype weight = th.softmax(weight.float(), dim=-1).type(wdtype) return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1) class MultiheadAttention(nn.Module): def __init__(self, n_ctx, width, heads): super().__init__() self.n_ctx = n_ctx self.width = width self.heads = heads self.c_qkv = nn.Linear(width, width * 3) self.c_proj = nn.Linear(width, width) self.attention = QKVMultiheadAttention(heads, n_ctx) def forward(self, x): x = self.c_qkv(x) x = self.attention(x) x = self.c_proj(x) return x class MLP(nn.Module): def __init__(self, width): super().__init__() self.width = width self.c_fc = nn.Linear(width, width * 4) self.c_proj = nn.Linear(width * 4, width) self.gelu = nn.GELU() def forward(self, x): return self.c_proj(self.gelu(self.c_fc(x))) class ResidualAttentionBlock(nn.Module): def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'): super().__init__() self.attn = MultiheadAttention(n_ctx, width, heads) self.ln_1 = LayerNorm(width) self.mlp = MLP(width) self.ln_2 = LayerNorm(width) def forward(self, x: 'th.Tensor'): x = x + self.attn(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_ctx': 4, 'width': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x3 = xindex // 16 tmp0 = tl.load(in_ptr0 + 3 * x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr2 + (8 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr2 + (12 + x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp4 * tmp7 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp4 * tmp10 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp4 * tmp13 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp16 = tmp6 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp8 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp15, xmask) tl.store(out_ptr2 + x4, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x2 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp7, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 + 3 * x1 + 12 * x0 + 48 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_mul_2[grid(64)](buf3, primals_5, buf5, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32) triton_poi_fused__softmax_mul_3[grid(64)](buf3, primals_5, buf5, buf4, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf4, buf5, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf7 triton_poi_fused_clone_5[grid(64)](buf3, primals_5, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_5 buf10 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0) del buf6 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12, primals_7, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_1, buf12, primals_7, buf13, buf14, primals_8, primals_9, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_9 buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_11 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_9[grid(256)](buf16, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_10[grid(64)](buf19, primals_1, buf12, primals_7, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 return buf19, primals_1, primals_7, primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0 ), primals_12, primals_10, primals_6, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), primals_4 class LayerNorm(nn.LayerNorm): """ Implementation that supports fp16 inputs but fp32 gains/biases. """ def forward(self, x: 'th.Tensor'): return super().forward(x.float()) class QKVMultiheadAttention(nn.Module): def __init__(self, n_heads: 'int', n_ctx: 'int'): super().__init__() self.n_heads = n_heads self.n_ctx = n_ctx def forward(self, qkv): bs, n_ctx, width = qkv.shape attn_ch = width // self.n_heads // 3 scale = 1 / math.sqrt(math.sqrt(attn_ch)) qkv = qkv.view(bs, n_ctx, self.n_heads, -1) q, k, v = th.split(qkv, attn_ch, dim=-1) weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale) wdtype = weight.dtype weight = th.softmax(weight.float(), dim=-1).type(wdtype) return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1) class MultiheadAttention(nn.Module): def __init__(self, n_ctx, width, heads): super().__init__() self.n_ctx = n_ctx self.width = width self.heads = heads self.c_qkv = nn.Linear(width, width * 3) self.c_proj = nn.Linear(width, width) self.attention = QKVMultiheadAttention(heads, n_ctx) def forward(self, x): x = self.c_qkv(x) x = self.attention(x) x = self.c_proj(x) return x class MLP(nn.Module): def __init__(self, width): super().__init__() self.width = width self.c_fc = nn.Linear(width, width * 4) self.c_proj = nn.Linear(width * 4, width) self.gelu = nn.GELU() def forward(self, x): return self.c_proj(self.gelu(self.c_fc(x))) class ResidualAttentionBlockNew(nn.Module): def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'): super().__init__() self.attn = MultiheadAttention(n_ctx, width, heads) self.ln_1 = LayerNorm(width) self.mlp = MLP(width) self.ln_2 = LayerNorm(width) def forward(self, input_0): primals_4 = self.attn.c_qkv.weight primals_5 = self.attn.c_qkv.bias primals_6 = self.attn.c_proj.weight primals_2 = self.attn.c_proj.bias primals_3 = self.ln_1.weight primals_7 = self.ln_1.bias primals_10 = self.mlp.c_fc.weight primals_11 = self.mlp.c_fc.bias primals_12 = self.mlp.c_proj.weight primals_8 = self.mlp.c_proj.bias primals_9 = self.ln_2.weight primals_13 = self.ln_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
johnpaulbin/glide-text2im
ResidualAttentionBlock
false
12,646
[ "MIT" ]
0
4897050c4c540316dfb1ec7e6ff95698bcb20487
https://github.com/johnpaulbin/glide-text2im/tree/4897050c4c540316dfb1ec7e6ff95698bcb20487
DistMultLayer
import torch import torch.utils.data import torch.nn as nn import torch as torch class DistMultLayer(nn.Module): def __init__(self): super(DistMultLayer, self).__init__() def forward(self, sub_emb, obj_emb, rel_emb): return torch.sum(sub_emb * obj_emb * rel_emb, dim=-1) def predict(self, sub_emb, obj_emb, rel_emb): return torch.matmul(sub_emb * rel_emb, obj_emb.t()) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn import torch as torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp7 = tmp5 * tmp6 tmp9 = tmp7 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 * tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 * tmp18 tmp21 = tmp19 * tmp20 tmp22 = tmp16 + tmp21 tl.store(out_ptr0 + x0, tmp22, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class DistMultLayerNew(nn.Module): def __init__(self): super(DistMultLayerNew, self).__init__() def predict(self, sub_emb, obj_emb, rel_emb): return torch.matmul(sub_emb * rel_emb, obj_emb.t()) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ckhui/cogdl
DistMultLayer
false
12,647
[ "MIT" ]
0
93bea17c2dc7084857cd0a4af8178c174965127c
https://github.com/ckhui/cogdl/tree/93bea17c2dc7084857cd0a4af8178c174965127c
RobertaClassificationHead
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, 2) def forward(self, features, **kwargs): x = features[:, 0, :] x = x.reshape(-1, x.size(-1) * 2) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 8), (8, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((8, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (8, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 4), (1, 8), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 triton_poi_fused_tanh_1[grid(32)](buf2, primals_3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((8, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return buf3, reinterpret_tensor(buf0, (8, 8), (8, 1), 0), buf2, primals_4 class RobertaClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, 2) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
kamranazmat/CodeBERT
RobertaClassificationHead
false
12,648
[ "MIT" ]
0
109c1b58b96c61314a76563c6bd686bb09f86eab
https://github.com/kamranazmat/CodeBERT/tree/109c1b58b96c61314a76563c6bd686bb09f86eab
InvertibleLinearFlow
import torch from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class InvertibleLinearFlow(Flow): def __init__(self, in_features, inverse=False): super(InvertibleLinearFlow, self).__init__(inverse) self.in_features = in_features self.weight = Parameter(torch.Tensor(in_features, in_features)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) def forward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = F.linear(input, self.weight) _, logdet = torch.linalg.slogdet(self.weight) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = F.linear(input, self.weight_inv) _, logdet = torch.linalg.slogdet(self.weight_inv) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: with torch.no_grad(): return self.forward(data) def extra_repr(self): return 'inverse={}, in_features={}'.format(self.inverse, self. in_features) @classmethod def from_params(cls, params: 'Dict') ->'InvertibleLinearFlow': return InvertibleLinearFlow(**params) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp7 = tmp6 * tmp4 tl.store(out_ptr1 + x0, tmp7, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = torch.ops.aten._linalg_slogdet.default(primals_2) del primals_2 buf3 = buf1[1] buf4 = buf1[2] buf5 = buf1[3] del buf1 buf6 = empty_strided_cuda((4,), (1,), torch.float32) buf7 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](primals_3, buf3, buf6, buf7, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf3 del primals_3 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf7, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf4, buf5, buf6 class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class InvertibleLinearFlowNew(Flow): def __init__(self, in_features, inverse=False): super(InvertibleLinearFlowNew, self).__init__(inverse) self.in_features = in_features self.weight = Parameter(torch.Tensor(in_features, in_features)) self.register_buffer('weight_inv', self.weight.data.clone()) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.weight) self.sync() def sync(self): self.weight_inv.copy_(self.weight.data.inverse()) def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = F.linear(input, self.weight_inv) _, logdet = torch.linalg.slogdet(self.weight_inv) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: with torch.no_grad(): return self.forward(data) def extra_repr(self): return 'inverse={}, in_features={}'.format(self.inverse, self. in_features) @classmethod def from_params(cls, params: 'Dict') ->'InvertibleLinearFlow': return InvertibleLinearFlowNew(**params) def forward(self, input_0, input_1): primals_2 = self.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
juheeuu/flowseq
InvertibleLinearFlow
false
12,649
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
ActNormFlow
import torch from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class ActNormFlow(Flow): def __init__(self, in_features, inverse=False): super(ActNormFlow, self).__init__(inverse) self.in_features = in_features self.log_scale = Parameter(torch.Tensor(in_features)) self.bias = Parameter(torch.Tensor(in_features)) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.log_scale, mean=0, std=0.05) nn.init.constant_(self.bias, 0.0) def forward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = input * self.log_scale.exp() + self.bias out = out * mask.unsqueeze(dim - 1) logdet = self.log_scale.sum(dim=0, keepdim=True) if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = (input - self.bias) * mask.unsqueeze(dim - 1) out = out.div(self.log_scale.exp() + 1e-08) logdet = self.log_scale.sum(dim=0, keepdim=True) * -1.0 if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: data: input: Tensor input tensor [batch, N1, N2, ..., in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] init_scale: float initial scale Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ with torch.no_grad(): out, _ = self.forward(data, mask) mean = out.view(-1, self.in_features).mean(dim=0) std = out.view(-1, self.in_features).std(dim=0) inv_stdv = init_scale / (std + 1e-06) self.log_scale.add_(inv_stdv.log()) self.bias.add_(-mean).mul_(inv_stdv) return self.forward(data, mask) def extra_repr(self): return 'inverse={}, in_features={}'.format(self.inverse, self. in_features) @classmethod def from_params(cls, params: 'Dict') ->'ActNormFlow': return ActNormFlow(**params) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from typing import Dict from typing import Tuple import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x5 = xindex // 16 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x0 + 4 * x5), xmask, eviction_policy='evict_last' ) tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_per_fused_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp7 = tmp6 * tmp4 tl.store(out_ptr1 + x0, tmp7, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(1024)](primals_1, primals_2, primals_3, primals_4, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((1,), (1,), torch.float32) triton_per_fused_sum_1[grid(1)](primals_2, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_2[grid(4)](primals_4, buf1, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf1 return buf0, buf3, primals_1, primals_2, primals_4, buf2 class Flow(nn.Module): """ Normalizing Flow base class """ _registry = dict() def __init__(self, inverse): super(Flow, self).__init__() self.inverse = inverse def forward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def backward(self, *inputs, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: *input: input [batch, *input_size] Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch] out, the output of the flow logdet, the log determinant of :math:`\\partial output / \\partial input` """ raise NotImplementedError def init(self, *input, **kwargs) ->Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError def fwdpass(self, x: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: x: Tensor The random variable before flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: y: Tensor, logdet: Tensor y, the random variable after flow logdet, the log determinant of :math:`\\partial y / \\partial x` Then the density :math:`\\log(p(y)) = \\log(p(x)) - logdet` """ if self.inverse: if init: raise RuntimeError( 'inverse flow shold be initialized with backward pass') else: return self.backward(x, *h, **kwargs) elif init: return self.init(x, *h, init_scale=init_scale, **kwargs) else: return self.forward(x, *h, **kwargs) def bwdpass(self, y: 'torch.Tensor', *h, init=False, init_scale=1.0, ** kwargs) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: y: Tensor The random variable after flow h: list of object other conditional inputs init: bool perform initialization or not (default: False) init_scale: float initial scale (default: 1.0) Returns: x: Tensor, logdet: Tensor x, the random variable before flow logdet, the log determinant of :math:`\\partial x / \\partial y` Then the density :math:`\\log(p(y)) = \\log(p(x)) + logdet` """ if self.inverse: if init: return self.init(y, *h, init_scale=init_scale, **kwargs) else: return self.forward(y, *h, **kwargs) elif init: raise RuntimeError( 'forward flow should be initialzed with forward pass') else: return self.backward(y, *h, **kwargs) @classmethod def register(cls, name: 'str'): Flow._registry[name] = cls @classmethod def by_name(cls, name: 'str'): return Flow._registry[name] @classmethod def from_params(cls, params: 'Dict'): raise NotImplementedError class ActNormFlowNew(Flow): def __init__(self, in_features, inverse=False): super(ActNormFlowNew, self).__init__(inverse) self.in_features = in_features self.log_scale = Parameter(torch.Tensor(in_features)) self.bias = Parameter(torch.Tensor(in_features)) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.log_scale, mean=0, std=0.05) nn.init.constant_(self.bias, 0.0) def backward(self, input: 'torch.Tensor', mask: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """ Args: input: Tensor input tensor [batch, N1, N2, ..., Nl, in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ dim = input.dim() out = (input - self.bias) * mask.unsqueeze(dim - 1) out = out.div(self.log_scale.exp() + 1e-08) logdet = self.log_scale.sum(dim=0, keepdim=True) * -1.0 if dim > 2: num = mask.view(out.size(0), -1).sum(dim=1) logdet = logdet * num return out, logdet def init(self, data: 'torch.Tensor', mask: 'torch.Tensor', init_scale=1.0 ) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: data: input: Tensor input tensor [batch, N1, N2, ..., in_features] mask: Tensor mask tensor [batch, N1, N2, ...,Nl] init_scale: float initial scale Returns: out: Tensor , logdet: Tensor out: [batch, N1, N2, ..., in_features], the output of the flow logdet: [batch], the log determinant of :math:`\\partial output / \\partial input` """ with torch.no_grad(): out, _ = self.forward(data, mask) mean = out.view(-1, self.in_features).mean(dim=0) std = out.view(-1, self.in_features).std(dim=0) inv_stdv = init_scale / (std + 1e-06) self.log_scale.add_(inv_stdv.log()) self.bias.add_(-mean).mul_(inv_stdv) return self.forward(data, mask) def extra_repr(self): return 'inverse={}, in_features={}'.format(self.inverse, self. in_features) @classmethod def from_params(cls, params: 'Dict') ->'ActNormFlow': return ActNormFlowNew(**params) def forward(self, input_0, input_1): primals_2 = self.log_scale primals_3 = self.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
juheeuu/flowseq
ActNormFlow
false
12,650
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
PrototypicalDecoder
import torch import typing from torch import Tensor from collections import Counter from typing import List from typing import Optional from typing import Union from torch.utils.data import Dataset import torch.utils.data.dataloader from torch import nn import torch.nn from torch.utils.data.dataset import Dataset from torch.utils.data import DataLoader def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) def arccosh(x): """Compute the arcosh, numerically stable.""" x = torch.clamp(x, min=1 + EPSILON) a = torch.log(x) b = torch.log1p(torch.sqrt(x * x - 1) / x) return a + b def _iter_dataset(dataset: 'Optional[Dataset]') ->typing.Iterable: if dataset is None: return [] return map(lambda x: x[0], DataLoader(dataset, batch_size=1, num_workers=0) ) def identify_dynamic_embeddings(data_point: 'DataPoint'): dynamic_embeddings = [] if isinstance(data_point, Sentence): first_token = data_point[0] for name, vector in first_token._embeddings.items(): if vector.requires_grad: dynamic_embeddings.append(name) for name, vector in data_point._embeddings.items(): if vector.requires_grad: dynamic_embeddings.append(name) return dynamic_embeddings def store_embeddings(data_points: 'Union[List[DT], Dataset]', storage_mode: 'str', dynamic_embeddings: 'Optional[List[str]]'=None): if isinstance(data_points, Dataset): data_points = list(_iter_dataset(data_points)) if storage_mode == 'none': dynamic_embeddings = None elif not dynamic_embeddings: dynamic_embeddings = identify_dynamic_embeddings(data_points[0]) for data_point in data_points: data_point.clear_embeddings(dynamic_embeddings) if storage_mode == 'cpu': str(flair.device) != 'cpu' for data_point in data_points: data_point def mdot(x, y): """Compute the inner product.""" m = x.new_ones(1, x.size(1)) m[0, 0] = -1 return torch.sum(m * x * y, 1, keepdim=True) def dist(x, y): """Get the hyperbolic distance between x and y.""" return arccosh(-mdot(x, y)) class CosineDistance(torch.nn.Module): def forward(self, a, b): return -dot_product(a, b, normalize=True) class EuclideanDistance(nn.Module): """Implement a EuclideanDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ _dist = [torch.sum((mat_1 - mat_2[i]) ** 2, dim=1) for i in range( mat_2.size(0))] dist = torch.stack(_dist, dim=1) return dist class HyperbolicDistance(nn.Module): """Implement a HyperbolicDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ mat_1_x_0 = torch.sqrt(1 + mat_1.pow(2).sum(dim=1, keepdim=True)) mat_2_x_0 = torch.sqrt(1 + mat_2.pow(2).sum(dim=1, keepdim=True)) left = mat_1_x_0.mm(mat_2_x_0.t()) right = mat_1[:, 1:].mm(mat_2[:, 1:].t()) return arccosh(left - right).pow(2) class LogitCosineDistance(torch.nn.Module): def forward(self, a, b): return torch.logit(0.5 - 0.5 * dot_product(a, b, normalize=True)) class NegativeScaledDotProduct(torch.nn.Module): def forward(self, a, b): sqrt_d = torch.sqrt(torch.tensor(a.size(-1))) return -dot_product(a, b, normalize=False) / sqrt_d class PrototypicalDecoder(torch.nn.Module): def __init__(self, num_prototypes: 'int', embeddings_size: 'int', prototype_size: 'Optional[int]'=None, distance_function: 'str'= 'euclidean', use_radius: 'Optional[bool]'=False, min_radius: 'Optional[int]'=0, unlabeled_distance: 'Optional[float]'=None, unlabeled_idx: 'Optional[int]'=None, learning_mode: 'Optional[str]' ='joint', normal_distributed_initial_prototypes: 'bool'=False): super().__init__() if not prototype_size: prototype_size = embeddings_size self.prototype_size = prototype_size self.metric_space_decoder: 'Optional[torch.nn.Linear]' = None if prototype_size != embeddings_size: self.metric_space_decoder = torch.nn.Linear(embeddings_size, prototype_size) torch.nn.init.xavier_uniform_(self.metric_space_decoder.weight) self.prototype_vectors = torch.nn.Parameter(torch.ones( num_prototypes, prototype_size), requires_grad=True) if normal_distributed_initial_prototypes: self.prototype_vectors = torch.nn.Parameter(torch.normal(torch. zeros(num_prototypes, prototype_size))) self.prototype_radii: 'Optional[torch.nn.Parameter]' = None if use_radius: self.prototype_radii = torch.nn.Parameter(torch.ones( num_prototypes), requires_grad=True) self.min_radius = min_radius self.learning_mode = learning_mode assert (unlabeled_idx is None) == (unlabeled_distance is None ), "'unlabeled_idx' and 'unlabeled_distance' should either both be set or both not be set." self.unlabeled_idx = unlabeled_idx self.unlabeled_distance = unlabeled_distance self._distance_function = distance_function self.distance: 'Optional[torch.nn.Module]' = None if distance_function.lower() == 'hyperbolic': self.distance = HyperbolicDistance() elif distance_function.lower() == 'cosine': self.distance = CosineDistance() elif distance_function.lower() == 'logit_cosine': self.distance = LogitCosineDistance() elif distance_function.lower() == 'euclidean': self.distance = EuclideanDistance() elif distance_function.lower() == 'dot_product': self.distance = NegativeScaledDotProduct() else: raise KeyError(f'Distance function {distance_function} not found.') self @property def num_prototypes(self): return self.prototype_vectors.size(0) def forward(self, embedded): if self.learning_mode == 'learn_only_map_and_prototypes': embedded = embedded.detach() if self.metric_space_decoder is not None: encoded = self.metric_space_decoder(embedded) else: encoded = embedded prot = self.prototype_vectors radii = self.prototype_radii if self.learning_mode == 'learn_only_prototypes': encoded = encoded.detach() if self.learning_mode == 'learn_only_embeddings_and_map': prot = prot.detach() if radii is not None: radii = radii.detach() distance = self.distance(encoded, prot) if radii is not None: distance /= self.min_radius + torch.nn.functional.softplus(radii) if self.unlabeled_distance: distance[..., self.unlabeled_idx] = self.unlabeled_distance scores = -distance return scores def enable_expectation_maximization(self, data: 'FlairDataset', encoder: 'DefaultClassifier', exempt_labels: 'List[str]'=[], mini_batch_size: 'int'=8): """Applies monkey-patch to train method (which sets the train flag). This allows for computation of average prototypes after a training sequence.""" decoder = self unpatched_train = encoder.train def patched_train(mode: 'bool'=True): unpatched_train(mode=mode) if mode: logger.info('recalculating prototypes') with torch.no_grad(): decoder.calculate_prototypes(data=data, encoder=encoder, exempt_labels=exempt_labels, mini_batch_size= mini_batch_size) encoder.train = patched_train def calculate_prototypes(self, data: 'FlairDataset', encoder: 'DefaultClassifier', exempt_labels: 'List[str]'=[], mini_batch_size=32 ): """ Function that calclues a prototype for each class based on the euclidean average embedding over the whole dataset :param data: dataset for which to calculate prototypes :param encoder: encoder to use :param exempt_labels: labels to exclude :param mini_batch_size: number of sentences to embed at same time :return: """ with torch.no_grad(): dataloader = DataLoader(data, batch_size=mini_batch_size) new_prototypes = torch.zeros(self.num_prototypes, self. prototype_size, device=flair.device) counter: 'Counter' = Counter() for batch in tqdm(dataloader): logits, labels = encoder.forward_pass(batch) if len(labels) > 0: if self.metric_space_decoder is not None: logits = self.metric_space_decoder(logits) for logit, label in zip(logits, labels): counter.update(label) idx = encoder.label_dictionary.get_idx_for_item(label [0]) new_prototypes[idx] += logit store_embeddings(batch, storage_mode='none') for label, count in counter.most_common(): average_prototype = new_prototypes[encoder.label_dictionary .get_idx_for_item(label)] / count new_prototypes[encoder.label_dictionary.get_idx_for_item(label) ] = average_prototype for label in exempt_labels: label_idx = encoder.label_dictionary.get_idx_for_item(label) new_prototypes[label_idx] = self.prototype_vectors[label_idx] self.prototype_vectors.data = new_prototypes def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_prototypes': 4, 'embeddings_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import typing from torch import Tensor from collections import Counter from typing import List from typing import Optional from typing import Union from torch.utils.data import Dataset import torch.utils.data.dataloader from torch import nn import torch.nn from torch.utils.data.dataset import Dataset from torch.utils.data import DataLoader assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_neg_stack_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x0 = xindex % 4 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp10 = tmp9 - tmp6 tmp11 = tmp10 * tmp10 tmp12 = tmp8 + tmp11 tmp13 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp14 = tmp13 - tmp6 tmp15 = tmp14 * tmp14 tmp16 = tmp12 + tmp15 tmp17 = tl.load(in_ptr0 + (48 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp18 = tmp17 - tmp6 tmp19 = tmp18 * tmp18 tmp20 = tmp16 + tmp19 tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp4, tmp20, tmp21) tmp23 = tmp0 >= tmp3 tmp24 = tl.full([1], 8, tl.int64) tmp25 = tmp0 < tmp24 tmp26 = tmp23 & tmp25 tmp27 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask, other=0.0) tmp28 = tl.load(in_ptr1 + (4 + x0), tmp26 & xmask, eviction_policy= 'evict_last', other=0.0) tmp29 = tmp27 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask, other=0.0) tmp32 = tmp31 - tmp28 tmp33 = tmp32 * tmp32 tmp34 = tmp30 + tmp33 tmp35 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask, other=0.0) tmp36 = tmp35 - tmp28 tmp37 = tmp36 * tmp36 tmp38 = tmp34 + tmp37 tmp39 = tl.load(in_ptr0 + (48 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask, other=0.0) tmp40 = tmp39 - tmp28 tmp41 = tmp40 * tmp40 tmp42 = tmp38 + tmp41 tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp26, tmp42, tmp43) tmp45 = tmp0 >= tmp24 tmp46 = tl.full([1], 12, tl.int64) tmp47 = tmp0 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 64 * x2), tmp48 & xmask, other=0.0) tmp50 = tl.load(in_ptr1 + (8 + x0), tmp48 & xmask, eviction_policy= 'evict_last', other=0.0) tmp51 = tmp49 - tmp50 tmp52 = tmp51 * tmp51 tmp53 = tl.load(in_ptr0 + (16 + x0 + 4 * (-8 + x1) + 64 * x2), tmp48 & xmask, other=0.0) tmp54 = tmp53 - tmp50 tmp55 = tmp54 * tmp54 tmp56 = tmp52 + tmp55 tmp57 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp48 & xmask, other=0.0) tmp58 = tmp57 - tmp50 tmp59 = tmp58 * tmp58 tmp60 = tmp56 + tmp59 tmp61 = tl.load(in_ptr0 + (48 + x0 + 4 * (-8 + x1) + 64 * x2), tmp48 & xmask, other=0.0) tmp62 = tmp61 - tmp50 tmp63 = tmp62 * tmp62 tmp64 = tmp60 + tmp63 tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype) tmp66 = tl.where(tmp48, tmp64, tmp65) tmp67 = tmp0 >= tmp46 tl.full([1], 16, tl.int64) tmp70 = tl.load(in_ptr0 + (x0 + 4 * (-12 + x1) + 64 * x2), tmp67 & xmask, other=0.0) tmp71 = tl.load(in_ptr1 + (12 + x0), tmp67 & xmask, eviction_policy= 'evict_last', other=0.0) tmp72 = tmp70 - tmp71 tmp73 = tmp72 * tmp72 tmp74 = tl.load(in_ptr0 + (16 + x0 + 4 * (-12 + x1) + 64 * x2), tmp67 & xmask, other=0.0) tmp75 = tmp74 - tmp71 tmp76 = tmp75 * tmp75 tmp77 = tmp73 + tmp76 tmp78 = tl.load(in_ptr0 + (32 + x0 + 4 * (-12 + x1) + 64 * x2), tmp67 & xmask, other=0.0) tmp79 = tmp78 - tmp71 tmp80 = tmp79 * tmp79 tmp81 = tmp77 + tmp80 tmp82 = tl.load(in_ptr0 + (48 + x0 + 4 * (-12 + x1) + 64 * x2), tmp67 & xmask, other=0.0) tmp83 = tmp82 - tmp71 tmp84 = tmp83 * tmp83 tmp85 = tmp81 + tmp84 tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype) tmp87 = tl.where(tmp67, tmp85, tmp86) tmp88 = tl.where(tmp48, tmp66, tmp87) tmp89 = tl.where(tmp26, tmp44, tmp88) tmp90 = tl.where(tmp4, tmp22, tmp89) tmp91 = -tmp90 tl.store(in_out_ptr0 + x3, tmp91, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_neg_stack_0[grid(256)](buf1, primals_1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, primals_1, primals_2 def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) def arccosh(x): """Compute the arcosh, numerically stable.""" x = torch.clamp(x, min=1 + EPSILON) a = torch.log(x) b = torch.log1p(torch.sqrt(x * x - 1) / x) return a + b def _iter_dataset(dataset: 'Optional[Dataset]') ->typing.Iterable: if dataset is None: return [] return map(lambda x: x[0], DataLoader(dataset, batch_size=1, num_workers=0) ) def identify_dynamic_embeddings(data_point: 'DataPoint'): dynamic_embeddings = [] if isinstance(data_point, Sentence): first_token = data_point[0] for name, vector in first_token._embeddings.items(): if vector.requires_grad: dynamic_embeddings.append(name) for name, vector in data_point._embeddings.items(): if vector.requires_grad: dynamic_embeddings.append(name) return dynamic_embeddings def store_embeddings(data_points: 'Union[List[DT], Dataset]', storage_mode: 'str', dynamic_embeddings: 'Optional[List[str]]'=None): if isinstance(data_points, Dataset): data_points = list(_iter_dataset(data_points)) if storage_mode == 'none': dynamic_embeddings = None elif not dynamic_embeddings: dynamic_embeddings = identify_dynamic_embeddings(data_points[0]) for data_point in data_points: data_point.clear_embeddings(dynamic_embeddings) if storage_mode == 'cpu': str(flair.device) != 'cpu' for data_point in data_points: data_point def mdot(x, y): """Compute the inner product.""" m = x.new_ones(1, x.size(1)) m[0, 0] = -1 return torch.sum(m * x * y, 1, keepdim=True) def dist(x, y): """Get the hyperbolic distance between x and y.""" return arccosh(-mdot(x, y)) class CosineDistance(torch.nn.Module): def forward(self, a, b): return -dot_product(a, b, normalize=True) class EuclideanDistance(nn.Module): """Implement a EuclideanDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ _dist = [torch.sum((mat_1 - mat_2[i]) ** 2, dim=1) for i in range( mat_2.size(0))] dist = torch.stack(_dist, dim=1) return dist class HyperbolicDistance(nn.Module): """Implement a HyperbolicDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ mat_1_x_0 = torch.sqrt(1 + mat_1.pow(2).sum(dim=1, keepdim=True)) mat_2_x_0 = torch.sqrt(1 + mat_2.pow(2).sum(dim=1, keepdim=True)) left = mat_1_x_0.mm(mat_2_x_0.t()) right = mat_1[:, 1:].mm(mat_2[:, 1:].t()) return arccosh(left - right).pow(2) class LogitCosineDistance(torch.nn.Module): def forward(self, a, b): return torch.logit(0.5 - 0.5 * dot_product(a, b, normalize=True)) class NegativeScaledDotProduct(torch.nn.Module): def forward(self, a, b): sqrt_d = torch.sqrt(torch.tensor(a.size(-1))) return -dot_product(a, b, normalize=False) / sqrt_d class PrototypicalDecoderNew(torch.nn.Module): def __init__(self, num_prototypes: 'int', embeddings_size: 'int', prototype_size: 'Optional[int]'=None, distance_function: 'str'= 'euclidean', use_radius: 'Optional[bool]'=False, min_radius: 'Optional[int]'=0, unlabeled_distance: 'Optional[float]'=None, unlabeled_idx: 'Optional[int]'=None, learning_mode: 'Optional[str]' ='joint', normal_distributed_initial_prototypes: 'bool'=False): super().__init__() if not prototype_size: prototype_size = embeddings_size self.prototype_size = prototype_size self.metric_space_decoder: 'Optional[torch.nn.Linear]' = None if prototype_size != embeddings_size: self.metric_space_decoder = torch.nn.Linear(embeddings_size, prototype_size) torch.nn.init.xavier_uniform_(self.metric_space_decoder.weight) self.prototype_vectors = torch.nn.Parameter(torch.ones( num_prototypes, prototype_size), requires_grad=True) if normal_distributed_initial_prototypes: self.prototype_vectors = torch.nn.Parameter(torch.normal(torch. zeros(num_prototypes, prototype_size))) self.prototype_radii: 'Optional[torch.nn.Parameter]' = None if use_radius: self.prototype_radii = torch.nn.Parameter(torch.ones( num_prototypes), requires_grad=True) self.min_radius = min_radius self.learning_mode = learning_mode assert (unlabeled_idx is None) == (unlabeled_distance is None ), "'unlabeled_idx' and 'unlabeled_distance' should either both be set or both not be set." self.unlabeled_idx = unlabeled_idx self.unlabeled_distance = unlabeled_distance self._distance_function = distance_function self.distance: 'Optional[torch.nn.Module]' = None if distance_function.lower() == 'hyperbolic': self.distance = HyperbolicDistance() elif distance_function.lower() == 'cosine': self.distance = CosineDistance() elif distance_function.lower() == 'logit_cosine': self.distance = LogitCosineDistance() elif distance_function.lower() == 'euclidean': self.distance = EuclideanDistance() elif distance_function.lower() == 'dot_product': self.distance = NegativeScaledDotProduct() else: raise KeyError(f'Distance function {distance_function} not found.') self @property def num_prototypes(self): return self.prototype_vectors.size(0) def enable_expectation_maximization(self, data: 'FlairDataset', encoder: 'DefaultClassifier', exempt_labels: 'List[str]'=[], mini_batch_size: 'int'=8): """Applies monkey-patch to train method (which sets the train flag). This allows for computation of average prototypes after a training sequence.""" decoder = self unpatched_train = encoder.train def patched_train(mode: 'bool'=True): unpatched_train(mode=mode) if mode: logger.info('recalculating prototypes') with torch.no_grad(): decoder.calculate_prototypes(data=data, encoder=encoder, exempt_labels=exempt_labels, mini_batch_size= mini_batch_size) encoder.train = patched_train def calculate_prototypes(self, data: 'FlairDataset', encoder: 'DefaultClassifier', exempt_labels: 'List[str]'=[], mini_batch_size=32 ): """ Function that calclues a prototype for each class based on the euclidean average embedding over the whole dataset :param data: dataset for which to calculate prototypes :param encoder: encoder to use :param exempt_labels: labels to exclude :param mini_batch_size: number of sentences to embed at same time :return: """ with torch.no_grad(): dataloader = DataLoader(data, batch_size=mini_batch_size) new_prototypes = torch.zeros(self.num_prototypes, self. prototype_size, device=flair.device) counter: 'Counter' = Counter() for batch in tqdm(dataloader): logits, labels = encoder.forward_pass(batch) if len(labels) > 0: if self.metric_space_decoder is not None: logits = self.metric_space_decoder(logits) for logit, label in zip(logits, labels): counter.update(label) idx = encoder.label_dictionary.get_idx_for_item(label [0]) new_prototypes[idx] += logit store_embeddings(batch, storage_mode='none') for label, count in counter.most_common(): average_prototype = new_prototypes[encoder.label_dictionary .get_idx_for_item(label)] / count new_prototypes[encoder.label_dictionary.get_idx_for_item(label) ] = average_prototype for label in exempt_labels: label_idx = encoder.label_dictionary.get_idx_for_item(label) new_prototypes[label_idx] = self.prototype_vectors[label_idx] self.prototype_vectors.data = new_prototypes def forward(self, input_0): primals_2 = self.prototype_vectors primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
k2room/ParaphraseQA
PrototypicalDecoder
false
12,651
[ "MIT" ]
0
5aebe02c26a0bac3731f18bb115b33ba3a772756
https://github.com/k2room/ParaphraseQA/tree/5aebe02c26a0bac3731f18bb115b33ba3a772756
TwoLinearsModel
import torch import torch.nn as nn import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 class TwoLinearsModel(nn.Module): def __init__(self, per_sample_shape: 'list', hidden_size: 'int', output_size: 'int'): super(TwoLinearsModel, self).__init__() assert len(per_sample_shape) == 3 self.per_sample_shape = per_sample_shape input_size = per_sample_shape[0] for dim in per_sample_shape[1:]: input_size *= dim self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, output_size) def forward(self, x: 'torch.Tensor'): batch_size = x.size(0) x = x.view(batch_size, -1) h_relu = self.linear1(x).clamp(min=0) y_pred = self.linear2(h_relu) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'per_sample_shape': [4, 4, 4], 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 64), (64, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (1, 64), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_0[grid(16)](buf0, primals_3, buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, primals_4, buf3 class TwoLinearsModelNew(nn.Module): def __init__(self, per_sample_shape: 'list', hidden_size: 'int', output_size: 'int'): super(TwoLinearsModelNew, self).__init__() assert len(per_sample_shape) == 3 self.per_sample_shape = per_sample_shape input_size = per_sample_shape[0] for dim in per_sample_shape[1:]: input_size *= dim self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, output_size) def forward(self, input_0): primals_2 = self.linear1.weight primals_3 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
arjunsuresh/aimet
TwoLinearsModel
false
12,652
[ "BSD-3-Clause" ]
0
f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
https://github.com/arjunsuresh/aimet/tree/f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
Net
import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_size, out_size, kernel=3, stride=1, padding=1, activ='relu', norm=None): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_size, out_size, kernel, stride, padding) self.norm = norm self.activ = activ if self.norm == 'batch': self.bn = nn.BatchNorm2d(out_size) elif self.norm == 'instance': self.bn = nn.InstanceNorm2d(out_size) if self.activ == 'relu': self.act = nn.ReLU(True) elif self.activ == 'prelu': self.act = nn.PReLU() elif self.activ == 'lrelu': self.act = nn.LeakyReLU(0.2, True) elif self.activ == 'tanh': self.act = nn.Tanh() def forward(self, x): if self.norm is not None: x = self.bn(self.conv(x)) else: x = self.conv(x) if self.activ is not None: return self.act(x) else: return x class DeconvBlock(nn.Module): def __init__(self, in_size, out_size, kernel=4, stride=2, padding=1, activ='relu', norm=None): super(DeconvBlock, self).__init__() self.deconv = nn.ConvTranspose2d(in_size, out_size, kernel, stride, padding) self.norm = norm self.activ = activ if self.norm == 'batch': self.bn = nn.BatchNorm2d(out_size) elif self.norm == 'instance': self.bn = nn.InstanceNorm2d(out_size) if self.activ == 'relu': self.act = nn.ReLU(True) elif self.activ == 'prelu': self.act = nn.PReLU() elif self.activ == 'lrelu': self.act = nn.LeakyReLU(0.2, True) elif self.activ == 'tanh': self.act = nn.Tanh() def forward(self, x): if self.norm is not None: x = self.bn(self.deconv(x)) else: x = self.deconv(x) if self.activ is not None: return self.act(x) else: return x class DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, activation='prelu'): super(DownBlock, self).__init__() self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) def forward(self, x): l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 class UpBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, activation='prelu'): super(UpBlock, self).__init__() self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) def forward(self, x): h0 = self.up_conv1(x) l0 = self.up_conv2(h0) h1 = self.up_conv3(l0 - x) return h1 + h0 class Net(nn.Module): def __init__(self, channels, filters, features, scale_fact): super(Net, self).__init__() self.lay1 = ConvBlock(in_size=channels, out_size=features, kernel=3, stride=1, padding=1, activ='prelu') self.lay2 = ConvBlock(in_size=features, out_size=filters, kernel=1, stride=1, padding=0, activ='prelu') self.down1 = DownBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.up1 = UpBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.down2 = DownBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.up2 = UpBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.out1 = DeconvBlock(in_size=filters, out_size=features, kernel= 1, stride=1, padding=0, activ='prelu') self.out2 = DeconvBlock(in_size=features, out_size=channels, kernel =2, stride=2, padding=0, activ='prelu') def forward(self, x): x = self.lay1(x) x = self.lay2(x) x = self.down1(x) x = self.up1(x) x = self.down2(x) x = self.up2(x) x = self.out1(x) x = self.out2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'filters': 4, 'features': 4, 'scale_fact': 1.0} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (1,), (1,)) assert_size_stride(primals_17, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (1,), (1,)) assert_size_stride(primals_20, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (1,), (1,)) assert_size_stride(primals_23, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_24, (4,), (1,)) assert_size_stride(primals_25, (1,), (1,)) assert_size_stride(primals_26, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (1,), (1,)) assert_size_stride(primals_29, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_30, (4,), (1,)) assert_size_stride(primals_31, (1,), (1,)) assert_size_stride(primals_32, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_33, (4,), (1,)) assert_size_stride(primals_34, (1,), (1,)) assert_size_stride(primals_35, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_36, (4,), (1,)) assert_size_stride(primals_37, (1,), (1,)) assert_size_stride(primals_38, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_39, (4,), (1,)) assert_size_stride(primals_40, (1,), (1,)) assert_size_stride(primals_41, (4, 4, 6, 6), (144, 36, 6, 1)) assert_size_stride(primals_42, (4,), (1,)) assert_size_stride(primals_43, (1,), (1,)) assert_size_stride(primals_44, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_45, (4,), (1,)) assert_size_stride(primals_46, (1,), (1,)) assert_size_stride(primals_47, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_48, (4,), (1,)) assert_size_stride(primals_49, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf4, primals_6, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_1[grid(64)](buf7, primals_9, primals_10, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf9 = extern_kernels.convolution(buf8, primals_11, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_2[grid(256)](buf10, primals_12, primals_13, buf5, buf11, 256, XBLOCK=256, num_warps =4, num_stages=1) del primals_12 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 2, 2), (16, 4, 2, 1)) buf13 = buf12 del buf12 buf14 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_3[grid(64)](buf13, primals_15, primals_16, buf8, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 buf15 = extern_kernels.convolution(buf14, primals_17, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = buf15 del buf15 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf16, primals_18, primals_19, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_18 buf18 = extern_kernels.convolution(buf17, primals_20, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 4, 2, 2), (16, 4, 2, 1)) buf19 = buf18 del buf18 buf20 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_4[grid(64)](buf19, primals_21, primals_22, buf14, buf20, 64, XBLOCK=64, num_warps= 1, num_stages=1) del primals_21 buf21 = extern_kernels.convolution(buf20, primals_23, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1)) buf22 = buf21 del buf21 buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_5[grid(256)](buf22, primals_24, primals_25, buf17, buf23, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_24 buf24 = extern_kernels.convolution(buf23, primals_26, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 4, 2, 2), (16, 4, 2, 1)) buf25 = buf24 del buf24 buf26 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_1[grid(64)](buf25, primals_27, primals_28, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_27 buf27 = extern_kernels.convolution(buf26, primals_29, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1)) buf28 = buf27 del buf27 buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_2[grid(256)](buf28, primals_30, primals_31, buf23, buf29, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_30 buf30 = extern_kernels.convolution(buf29, primals_32, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 2, 2), (16, 4, 2, 1)) buf31 = buf30 del buf30 buf32 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_3[grid(64)](buf31, primals_33, primals_34, buf26, buf32, 64, XBLOCK=64, num_warps= 1, num_stages=1) del primals_33 buf33 = extern_kernels.convolution(buf32, primals_35, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 4, 4, 4), (64, 16, 4, 1)) buf34 = buf33 del buf33 buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf34, primals_36, primals_37, buf35, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_36 buf36 = extern_kernels.convolution(buf35, primals_38, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 4, 2, 2), (16, 4, 2, 1)) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_4[grid(64)](buf37, primals_39, primals_40, buf32, buf38, 64, XBLOCK=64, num_warps= 1, num_stages=1) del primals_39 buf39 = extern_kernels.convolution(buf38, primals_41, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 4, 4, 4), (64, 16, 4, 1)) buf40 = buf39 del buf39 buf41 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_5[grid(256)](buf40, primals_42, primals_43, buf35, buf41, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_42 buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 4, 4, 4), (64, 16, 4, 1)) buf43 = buf42 del buf42 buf44 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf43, primals_45, primals_46, buf44, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_45 buf45 = extern_kernels.convolution(buf44, primals_47, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 4, 8, 8), (256, 64, 8, 1)) buf46 = buf45 del buf45 buf47 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32 ) triton_poi_fused__prelu_kernel_convolution_6[grid(1024)](buf46, primals_48, primals_49, buf47, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_48 return (buf47, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, primals_14, primals_16, primals_17, primals_19, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, primals_29, primals_31, primals_32, primals_34, primals_35, primals_37, primals_38, primals_40, primals_41, primals_43, primals_44, primals_46, primals_47, primals_49, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37, buf38, buf40, buf41, buf43, buf44, buf46) class ConvBlock(nn.Module): def __init__(self, in_size, out_size, kernel=3, stride=1, padding=1, activ='relu', norm=None): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_size, out_size, kernel, stride, padding) self.norm = norm self.activ = activ if self.norm == 'batch': self.bn = nn.BatchNorm2d(out_size) elif self.norm == 'instance': self.bn = nn.InstanceNorm2d(out_size) if self.activ == 'relu': self.act = nn.ReLU(True) elif self.activ == 'prelu': self.act = nn.PReLU() elif self.activ == 'lrelu': self.act = nn.LeakyReLU(0.2, True) elif self.activ == 'tanh': self.act = nn.Tanh() def forward(self, x): if self.norm is not None: x = self.bn(self.conv(x)) else: x = self.conv(x) if self.activ is not None: return self.act(x) else: return x class DeconvBlock(nn.Module): def __init__(self, in_size, out_size, kernel=4, stride=2, padding=1, activ='relu', norm=None): super(DeconvBlock, self).__init__() self.deconv = nn.ConvTranspose2d(in_size, out_size, kernel, stride, padding) self.norm = norm self.activ = activ if self.norm == 'batch': self.bn = nn.BatchNorm2d(out_size) elif self.norm == 'instance': self.bn = nn.InstanceNorm2d(out_size) if self.activ == 'relu': self.act = nn.ReLU(True) elif self.activ == 'prelu': self.act = nn.PReLU() elif self.activ == 'lrelu': self.act = nn.LeakyReLU(0.2, True) elif self.activ == 'tanh': self.act = nn.Tanh() def forward(self, x): if self.norm is not None: x = self.bn(self.deconv(x)) else: x = self.deconv(x) if self.activ is not None: return self.act(x) else: return x class DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, activation='prelu'): super(DownBlock, self).__init__() self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) def forward(self, x): l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 class UpBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, activation='prelu'): super(UpBlock, self).__init__() self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation) def forward(self, x): h0 = self.up_conv1(x) l0 = self.up_conv2(h0) h1 = self.up_conv3(l0 - x) return h1 + h0 class NetNew(nn.Module): def __init__(self, channels, filters, features, scale_fact): super(NetNew, self).__init__() self.lay1 = ConvBlock(in_size=channels, out_size=features, kernel=3, stride=1, padding=1, activ='prelu') self.lay2 = ConvBlock(in_size=features, out_size=filters, kernel=1, stride=1, padding=0, activ='prelu') self.down1 = DownBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.up1 = UpBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.down2 = DownBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.up2 = UpBlock(num_filter=filters, kernel_size=6, stride=2, padding=2, activation='prelu') self.out1 = DeconvBlock(in_size=filters, out_size=features, kernel= 1, stride=1, padding=0, activ='prelu') self.out2 = DeconvBlock(in_size=features, out_size=channels, kernel =2, stride=2, padding=0, activ='prelu') def forward(self, input_0): primals_1 = self.lay1.conv.weight primals_2 = self.lay1.conv.bias primals_4 = self.lay1.act.weight primals_5 = self.lay2.conv.weight primals_6 = self.lay2.conv.bias primals_7 = self.lay2.act.weight primals_8 = self.down1.down_conv1.conv.weight primals_9 = self.down1.down_conv1.conv.bias primals_10 = self.down1.down_conv1.act.weight primals_11 = self.down1.down_conv2.deconv.weight primals_12 = self.down1.down_conv2.deconv.bias primals_13 = self.down1.down_conv2.act.weight primals_14 = self.down1.down_conv3.conv.weight primals_15 = self.down1.down_conv3.conv.bias primals_16 = self.down1.down_conv3.act.weight primals_17 = self.up1.up_conv1.deconv.weight primals_18 = self.up1.up_conv1.deconv.bias primals_19 = self.up1.up_conv1.act.weight primals_20 = self.up1.up_conv2.conv.weight primals_21 = self.up1.up_conv2.conv.bias primals_22 = self.up1.up_conv2.act.weight primals_23 = self.up1.up_conv3.deconv.weight primals_24 = self.up1.up_conv3.deconv.bias primals_25 = self.up1.up_conv3.act.weight primals_26 = self.down2.down_conv1.conv.weight primals_27 = self.down2.down_conv1.conv.bias primals_28 = self.down2.down_conv1.act.weight primals_29 = self.down2.down_conv2.deconv.weight primals_30 = self.down2.down_conv2.deconv.bias primals_31 = self.down2.down_conv2.act.weight primals_32 = self.down2.down_conv3.conv.weight primals_33 = self.down2.down_conv3.conv.bias primals_34 = self.down2.down_conv3.act.weight primals_35 = self.up2.up_conv1.deconv.weight primals_36 = self.up2.up_conv1.deconv.bias primals_37 = self.up2.up_conv1.act.weight primals_38 = self.up2.up_conv2.conv.weight primals_39 = self.up2.up_conv2.conv.bias primals_40 = self.up2.up_conv2.act.weight primals_41 = self.up2.up_conv3.deconv.weight primals_42 = self.up2.up_conv3.deconv.bias primals_43 = self.up2.up_conv3.act.weight primals_44 = self.out1.deconv.weight primals_45 = self.out1.deconv.bias primals_46 = self.out1.act.weight primals_47 = self.out2.deconv.weight primals_48 = self.out2.deconv.bias primals_49 = self.out2.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49]) return output[0]
jth1011/ECE539-Project
Net
false
12,653
[ "MIT" ]
0
bce6ffd75da92e862d8fda3852be247602b1567e
https://github.com/jth1011/ECE539-Project/tree/bce6ffd75da92e862d8fda3852be247602b1567e
ColorJitterLayer
from torch.autograd import Function import math import numbers import torch import numpy as np import torch.nn as nn import torch.utils.cpp_extension def hsv2rgb(hsv): """Convert a 4-d HSV tensor to the RGB counterpart. >>> %timeit hsv2rgb_lookup(hsv) 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit hsv2rgb(rgb) 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6) True References [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative """ h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] c = v * s n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) k = (n + h * 6) % 6 t = torch.min(k, 4.0 - k) t = torch.clamp(t, 0, 1) return v - c * t def rgb2hsv(rgb): """Convert a 4-d RGB tensor to the HSV counterpart. Here, we compute hue using atan2() based on the definition in [1], instead of using the common lookup table approach as in [2, 3]. Those values agree when the angle is a multiple of 30°, otherwise they may differ at most ~1.2°. >>> %timeit rgb2hsv_lookup(rgb) 1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit rgb2hsv(rgb) 380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max() tensor(0.0031, device='cuda:0') References [1] https://en.wikipedia.org/wiki/Hue [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html [3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212 """ r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] Cmax = rgb.max(1)[0] Cmin = rgb.min(1)[0] hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) hue = hue % (2 * math.pi) / (2 * math.pi) saturate = 1 - Cmin / (Cmax + 1e-08) value = Cmax hsv = torch.stack([hue, saturate, value], dim=1) hsv[~torch.isfinite(hsv)] = 0.0 return hsv class RandomHSVFunction(Function): @staticmethod def forward(ctx, x, f_h, f_s, f_v): x = rgb2hsv(x) h = x[:, 0, :, :] h += f_h * 255.0 / 360.0 h = h % 1 x[:, 0, :, :] = h x[:, 1, :, :] = x[:, 1, :, :] * f_s x[:, 2, :, :] = x[:, 2, :, :] * f_v x = torch.clamp(x, 0, 1) x = hsv2rgb(x) return x @staticmethod def backward(ctx, grad_output): grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output.clone() return grad_input, None, None, None class ColorJitterLayer(nn.Module): def __init__(self, brightness, contrast, saturation, hue): super(ColorJitterLayer, self).__init__() self.brightness = self._check_input(brightness, 'brightness') self.contrast = self._check_input(contrast, 'contrast') self.saturation = self._check_input(saturation, 'saturation') self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5 ), clip_first_on_zero=False) def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: raise ValueError( 'If {} is a single number, it must be non negative.'. format(name)) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: raise ValueError('{} values should be between {}'.format( name, bound)) else: raise TypeError( '{} should be a single number or a list/tuple with lenght 2.' .format(name)) if value[0] == value[1] == center: value = None return value def adjust_contrast(self, x): if self.contrast: factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast) means = torch.mean(x, dim=[2, 3], keepdim=True) x = (x - means) * factor + means return torch.clamp(x, 0, 1) def adjust_hsv(self, x): f_h = x.new_zeros(x.size(0), 1, 1) f_s = x.new_ones(x.size(0), 1, 1) f_v = x.new_ones(x.size(0), 1, 1) if self.hue: f_h.uniform_(*self.hue) if self.saturation: f_s = f_s.uniform_(*self.saturation) if self.brightness: f_v = f_v.uniform_(*self.brightness) return RandomHSVFunction.apply(x, f_h, f_s, f_v) def transform(self, inputs): if np.random.rand() > 0.5: transforms = [self.adjust_contrast, self.adjust_hsv] else: transforms = [self.adjust_hsv, self.adjust_contrast] for t in transforms: inputs = t(inputs) return inputs def forward(self, inputs): return self.transform(inputs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'brightness': 4, 'contrast': 4, 'saturation': 4, 'hue': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.autograd import Function import math import numbers import numpy as np import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 12 x0 = xindex % 4 x2 = xindex // 48 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.7320508075688772 tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp11 = 2.0 tmp12 = tmp10 * tmp11 tmp13 = tmp12 - tmp5 tmp14 = tmp13 - tmp6 tmp15 = libdevice.atan2(tmp9, tmp14) tmp16 = 6.283185307179586 tmp17 = tmp15 % tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = tmp17 != tmp18 tmp20 = libdevice.signbit(tmp17 ) if tmp17.dtype is tl.float32 else tmp17 < 0 tmp21 = libdevice.signbit(tmp16 ) if tmp16.dtype is tl.float32 else tmp16 < 0 tmp22 = tmp20 != tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 + tmp16 tmp25 = tl.where(tmp23, tmp24, tmp17) tmp26 = 0.15915494309189535 tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp4, tmp27, tmp28) tmp30 = tmp0 >= tmp3 tmp31 = tl.full([1], 8, tl.int64) tmp32 = tmp0 < tmp31 tmp33 = tmp30 & tmp32 tmp34 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp35 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp36 = triton_helpers.minimum(tmp34, tmp35) tmp37 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp38 = triton_helpers.minimum(tmp36, tmp37) tmp39 = tl.load(in_ptr0 + (48 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp40 = triton_helpers.minimum(tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp34, tmp35) tmp42 = triton_helpers.maximum(tmp41, tmp37) tmp43 = triton_helpers.maximum(tmp42, tmp39) tmp44 = 1e-08 tmp45 = tmp43 + tmp44 tmp46 = tmp40 / tmp45 tmp47 = 1.0 tmp48 = tmp47 - tmp46 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp33, tmp48, tmp49) tmp51 = tmp0 >= tmp31 tl.full([1], 12, tl.int64) tmp54 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp55 = tl.load(in_ptr0 + (16 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp56 = triton_helpers.maximum(tmp54, tmp55) tmp57 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp58 = triton_helpers.maximum(tmp56, tmp57) tmp59 = tl.load(in_ptr0 + (48 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp60 = triton_helpers.maximum(tmp58, tmp59) tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp51, tmp60, tmp61) tmp63 = tl.where(tmp33, tmp50, tmp62) tmp64 = tl.where(tmp4, tmp29, tmp63) tl.store(out_ptr0 + x3, tmp64, xmask) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 == tmp0 tmp2 = tl_math.abs(tmp0) tmp3 = float('inf') tmp4 = tmp2 != tmp3 tmp5 = tmp1 & tmp4 tmp6 = tmp5 == 0 tmp7 = 0.0 tmp8 = tl.where(tmp6, tmp7, tmp0) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_new_ones_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_copy_div_mul_remainder_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp6 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp1 == tmp3 tmp5 = tmp3 == tmp3 tmp8 = 255.0 tmp9 = tmp7 * tmp8 tmp10 = 0.002777777777777778 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tmp13 = tl.where(tmp5, tmp12, tmp6) tmp14 = 1.0 tmp15 = tmp13 % tmp14 tmp16 = tmp15 != tmp3 tmp17 = libdevice.signbit(tmp15 ) if tmp15.dtype is tl.float32 else tmp15 < 0 tmp18 = libdevice.signbit(tmp14 ) if tmp14.dtype is tl.float32 else tmp14 < 0 tmp19 = tmp17 != tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp15 + tmp14 tmp22 = tl.where(tmp20, tmp21, tmp15) tmp24 = tl.where(tmp4, tmp12, tmp23) tmp25 = tl.where(tmp4, tmp22, tmp24) tmp27 = tmp25 * tmp26 tmp28 = tmp0 == tmp3 tmp30 = tl.where(tmp28, tmp12, tmp29) tmp31 = tl.where(tmp28, tmp22, tmp30) tmp32 = tl.where(tmp2, tmp27, tmp31) tl.store(out_ptr0 + x3, tmp32, xmask) @triton.jit def triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5( in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 3 r2 = rindex x1 = xindex // 3 x3 = xindex tmp13 = tl.load(in_ptr0 + (32 + r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr0 + (16 + r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1, 1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 3.0 tmp6 = 1.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = 5.0 tmp9 = tl.where(tmp2, tmp8, tmp7) tmp10 = tl.full([1, 1], 0, tl.int32) tmp11 = tl.full([1, 1], 2, tl.int32) tmp12 = tmp10 == tmp11 tmp15 = tmp13 * tmp14 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = triton_helpers.minimum(tmp19, tmp6) tmp21 = 6.0 tmp22 = tmp20 * tmp21 tmp23 = tmp9 + tmp22 tmp24 = tmp23 % tmp21 tmp25 = tmp24 != tmp10 tmp26 = libdevice.signbit(tmp24 ) if tmp24.dtype is tl.float32 else tmp24 < 0 tmp27 = libdevice.signbit(tmp21 ) if tmp21.dtype is tl.float32 else tmp21 < 0 tmp28 = tmp26 != tmp27 tmp29 = tmp25 & tmp28 tmp30 = tmp24 + tmp21 tmp31 = tl.where(tmp29, tmp30, tmp24) tmp32 = 4.0 tmp33 = tmp32 - tmp31 tmp34 = triton_helpers.minimum(tmp31, tmp33) tmp35 = triton_helpers.maximum(tmp34, tmp18) tmp36 = tmp11 == tmp11 tmp37 = tl.where(tmp36, tmp15, tmp13) tmp38 = triton_helpers.maximum(tmp37, tmp18) tmp39 = triton_helpers.minimum(tmp38, tmp6) tmp40 = tl.full([1, 1], 1, tl.int32) tmp41 = tmp40 == tmp11 tmp43 = tl.where(tmp41, tmp15, tmp42) tmp44 = triton_helpers.maximum(tmp43, tmp18) tmp45 = triton_helpers.minimum(tmp44, tmp6) tmp46 = tmp39 * tmp45 tmp47 = triton_helpers.minimum(tmp35, tmp6) tmp48 = tmp46 * tmp47 tmp49 = tmp39 - tmp48 tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.where(xmask, tmp50, 0) tmp53 = tl.sum(tmp52, 1)[:, None] tmp54 = 16.0 tmp55 = tmp53 / tmp54 tmp56 = tmp49 - tmp55 tmp58 = tmp56 * tmp57 tmp59 = tmp58 + tmp55 tmp60 = triton_helpers.maximum(tmp59, tmp18) tmp61 = triton_helpers.minimum(tmp60, tmp6) tl.store(out_ptr1 + (r2 + 16 * x3), tmp61, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12, 4), (48, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 triton_poi_fused_index_put_lift_fresh_1[grid(192)](buf0, buf0, 192, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) triton_poi_fused_new_zeros_2[grid(4)](buf7, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf8 = torch.ops.aten.uniform.default(buf7, -4.0, 4.0) buf9 = buf8 del buf8 buf10 = buf7 del buf7 triton_poi_fused_new_ones_3[grid(4)](buf10, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf11 = torch.ops.aten.uniform.default(buf10, 0.0, 5.0) del buf10 buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) triton_poi_fused_add_copy_div_mul_remainder_4[grid(192)](buf0, buf9, buf12, buf13, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf12 buf14 = buf9 del buf9 triton_poi_fused_new_ones_3[grid(4)](buf14, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf15 = torch.ops.aten.uniform.default(buf14, 0.0, 5.0) buf16 = buf15 del buf15 buf20 = reinterpret_tensor(buf14, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf14 buf21 = torch.ops.aten.uniform.default(buf20, 0.0, 5.0) del buf20 buf22 = buf21 del buf21 buf23 = reinterpret_tensor(buf0, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf0 triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5[ grid(12)](buf13, buf16, buf22, buf23, 12, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf13 del buf16 del buf22 return buf23, def hsv2rgb(hsv): """Convert a 4-d HSV tensor to the RGB counterpart. >>> %timeit hsv2rgb_lookup(hsv) 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit hsv2rgb(rgb) 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6) True References [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative """ h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] c = v * s n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) k = (n + h * 6) % 6 t = torch.min(k, 4.0 - k) t = torch.clamp(t, 0, 1) return v - c * t def rgb2hsv(rgb): """Convert a 4-d RGB tensor to the HSV counterpart. Here, we compute hue using atan2() based on the definition in [1], instead of using the common lookup table approach as in [2, 3]. Those values agree when the angle is a multiple of 30°, otherwise they may differ at most ~1.2°. >>> %timeit rgb2hsv_lookup(rgb) 1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit rgb2hsv(rgb) 380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max() tensor(0.0031, device='cuda:0') References [1] https://en.wikipedia.org/wiki/Hue [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html [3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212 """ r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] Cmax = rgb.max(1)[0] Cmin = rgb.min(1)[0] hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) hue = hue % (2 * math.pi) / (2 * math.pi) saturate = 1 - Cmin / (Cmax + 1e-08) value = Cmax hsv = torch.stack([hue, saturate, value], dim=1) hsv[~torch.isfinite(hsv)] = 0.0 return hsv class RandomHSVFunction(Function): @staticmethod def forward(ctx, x, f_h, f_s, f_v): x = rgb2hsv(x) h = x[:, 0, :, :] h += f_h * 255.0 / 360.0 h = h % 1 x[:, 0, :, :] = h x[:, 1, :, :] = x[:, 1, :, :] * f_s x[:, 2, :, :] = x[:, 2, :, :] * f_v x = torch.clamp(x, 0, 1) x = hsv2rgb(x) return x @staticmethod def backward(ctx, grad_output): grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output.clone() return grad_input, None, None, None class ColorJitterLayerNew(nn.Module): def __init__(self, brightness, contrast, saturation, hue): super(ColorJitterLayerNew, self).__init__() self.brightness = self._check_input(brightness, 'brightness') self.contrast = self._check_input(contrast, 'contrast') self.saturation = self._check_input(saturation, 'saturation') self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5 ), clip_first_on_zero=False) def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: raise ValueError( 'If {} is a single number, it must be non negative.'. format(name)) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: raise ValueError('{} values should be between {}'.format( name, bound)) else: raise TypeError( '{} should be a single number or a list/tuple with lenght 2.' .format(name)) if value[0] == value[1] == center: value = None return value def adjust_contrast(self, x): if self.contrast: factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast) means = torch.mean(x, dim=[2, 3], keepdim=True) x = (x - means) * factor + means return torch.clamp(x, 0, 1) def adjust_hsv(self, x): f_h = x.new_zeros(x.size(0), 1, 1) f_s = x.new_ones(x.size(0), 1, 1) f_v = x.new_ones(x.size(0), 1, 1) if self.hue: f_h.uniform_(*self.hue) if self.saturation: f_s = f_s.uniform_(*self.saturation) if self.brightness: f_v = f_v.uniform_(*self.brightness) return RandomHSVFunction.apply(x, f_h, f_s, f_v) def transform(self, inputs): if np.random.rand() > 0.5: transforms = [self.adjust_contrast, self.adjust_hsv] else: transforms = [self.adjust_hsv, self.adjust_contrast] for t in transforms: inputs = t(inputs) return inputs def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hugobloem/PyTorch-StudioGAN
ColorJitterLayer
false
12,654
[ "MIT" ]
0
3deab27c0774adba5a94c7f452d32d4cbc3b117c
https://github.com/hugobloem/PyTorch-StudioGAN/tree/3deab27c0774adba5a94c7f452d32d4cbc3b117c
DiceLoss
import torch import torch.nn as nn class DiceLoss(nn.Module): def __init__(self, smooth=0, eps=1e-07): super(DiceLoss, self).__init__() self.smooth = smooth self.eps = eps def forward(self, output, target): return 1 - (2 * torch.sum(output * target) + self.smooth) / (torch. sum(output) + torch.sum(target) + self.smooth + self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = 0.0 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp11 tmp17 = tmp16 + tmp14 tmp18 = 1e-07 tmp19 = tmp17 + tmp18 tmp20 = tmp15 / tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class DiceLossNew(nn.Module): def __init__(self, smooth=0, eps=1e-07): super(DiceLossNew, self).__init__() self.smooth = smooth self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kant/open-solution-ship-detection
DiceLoss
false
12,655
[ "MIT" ]
0
94fa14fc461d6088d884930cbd8e2a2b99a338b5
https://github.com/kant/open-solution-ship-detection/tree/94fa14fc461d6088d884930cbd8e2a2b99a338b5
NetVLAD
import torch import torch.nn as nn import torch.optim import torch.utils.data import torch.nn.functional as F class NetVLAD(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=16, dim=2048, alpha=30.0, normalize_input=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. """ super(NetVLAD, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = alpha self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self._init_params() def _init_params(self): self.conv.weight = nn.Parameter((2.0 * self.alpha * self.centroids) .unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm(dim=1)) def forward(self, x): N, C = x.shape[:2] if self.normalize_input: x = F.normalize(x, p=2, dim=1) soft_assign = self.conv(x).view(N, self.num_clusters, -1) soft_assign = F.softmax(soft_assign, dim=1) x_flatten = x.view(N, C, -1) residual = x_flatten.expand(self.num_clusters, -1, -1, -1).permute( 1, 0, 2, 3) - self.centroids.expand(x_flatten.size(-1), -1, -1 ).permute(1, 2, 0).unsqueeze(0) residual *= soft_assign.unsqueeze(2) vlad = residual.sum(dim=-1) vlad = F.normalize(vlad, p=2, dim=2) vlad = vlad.view(x.size(0), -1) vlad = F.normalize(vlad, p=2, dim=1) return vlad def get_inputs(): return [torch.rand([4, 2048, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 8388608 * x1), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = yindex // 2048 y0 = yindex % 2048 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y1), None, eviction_policy= 'evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tl.store(out_ptr0 + (y0 + 2048 * x2 + 8388608 * y1), tmp5, None) @triton.jit def triton_per_fused__softmax_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 16 * x0), None) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = triton_helpers.max2(tmp3, 1)[:, None] tmp6 = tmp2 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp2, None) tl.store(out_ptr0 + x0, tmp5, None) tl.store(out_ptr1 + x0, tmp10, None) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 2048 x2 = xindex // 32768 x4 = xindex % 32768 tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last') x1 = xindex // 2048 % 16 _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x5 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r3 + 8388608 * x2), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr2 + (x1 + 16 * r3 + 65536 * x2), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r3 + 4096 * x2), rmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r3 + 4096 * x2), rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask, tmp12, _tmp11) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + x5, tmp11, None) @triton.jit def triton_red_fused_linalg_vector_norm_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 64 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask & xmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp3) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_red_fused_linalg_vector_norm_5(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 16 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (4 * x0 + r1 // 2048), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_per_fused_linalg_vector_norm_6(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_div_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x2 // 2048, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp6 = triton_helpers.maximum(tmp5, tmp2) tmp7 = tmp4 / tmp6 tl.store(out_ptr0 + x2, tmp7, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 2048, 64, 64), (8388608, 4096, 64, 1)) assert_size_stride(primals_2, (16, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16, 2048), (2048, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) get_raw_stream(0) triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0, 16384, 2048, XBLOCK=64, RBLOCK=64, num_warps=16, num_stages=1) buf1 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 1, 131072, 2048), torch.float32) triton_poi_fused_div_1[grid(8192, 4096)](primals_1, buf0, buf1, 8192, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 64, 64), (65536, 1, 1024, 16)) buf3 = buf2 del buf2 buf4 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) triton_per_fused__softmax_convolution_2[grid(16384)](buf3, primals_3, buf4, buf5, 16384, 16, XBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf6 = empty_strided_cuda((4, 16, 2048), (32768, 2048, 1), torch. float32) triton_red_fused_mul_sub_sum_3[grid(131072)](buf1, primals_4, buf3, buf4, buf5, buf6, 131072, 4096, XBLOCK=64, RBLOCK=4, num_warps= 8, num_stages=1) buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 64), torch.float32) buf8 = reinterpret_tensor(buf7, (4, 16, 1), (16, 1, 1), 0) del buf7 triton_red_fused_linalg_vector_norm_4[grid(64)](buf8, buf6, 64, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_red_fused_linalg_vector_norm_5[grid(16)](buf6, buf8, buf9, 16, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf11 = reinterpret_tensor(buf10, (4, 1), (1, 1), 0) del buf10 triton_per_fused_linalg_vector_norm_6[grid(4)](buf11, buf9, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf9 buf12 = empty_strided_cuda((4, 32768), (32768, 1), torch.float32) triton_poi_fused_div_7[grid(131072)](buf6, buf8, buf11, buf12, 131072, XBLOCK=512, num_warps=8, num_stages=1) return (buf12, primals_2, primals_4, buf1, buf3, buf4, buf5, buf6, buf8, buf11) class NetVLADNew(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=16, dim=2048, alpha=30.0, normalize_input=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. """ super(NetVLADNew, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = alpha self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self._init_params() def _init_params(self): self.conv.weight = nn.Parameter((2.0 * self.alpha * self.centroids) .unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm(dim=1)) def forward(self, input_0): primals_4 = self.centroids primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
fede-vaccaro/cnnimageretrieval-pytorch
NetVLAD
false
12,656
[ "MIT" ]
0
56bf4ee865e9769801819943f75fff207f0c2f00
https://github.com/fede-vaccaro/cnnimageretrieval-pytorch/tree/56bf4ee865e9769801819943f75fff207f0c2f00
Conv1dWeightNorm
import torch import torch.nn as nn class Conv1dWeightNorm(nn.Module): """ Conv1d with weight normalization """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv1dWeightNorm, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.conv.weight, mean=0.0, std=0.05) if self.conv.bias is not None: nn.init.constant_(self.conv.bias, 0) self.conv = nn.utils.weight_norm(self.conv) def init(self, x, init_scale=1.0): with torch.no_grad(): out = self(x) n_channels = out.size(1) out = out.transpose(0, 1).contiguous().view(n_channels, -1) mean = out.mean(dim=1) std = out.std(dim=1) inv_stdv = init_scale / (std + 1e-06) self.conv.weight_g.mul_(inv_stdv.view(n_channels, 1, 1)) if self.conv.bias is not None: self.conv.bias.add_(-mean).mul_(inv_stdv) return self(x) def forward(self, input): return self.conv(input) def extra_repr(self): return self.conv.extra_repr() def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2, primals_1, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), buf2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1), (4, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(4)](buf4, primals_3, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf4, (4, 1), (1, 1), 0 ), buf2, primals_1, primals_2, buf1, buf2, reinterpret_tensor(primals_4 , (1, 4, 4), (16, 4, 1), 0) class Conv1dWeightNormNew(nn.Module): """ Conv1d with weight normalization """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv1dWeightNormNew, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.conv.weight, mean=0.0, std=0.05) if self.conv.bias is not None: nn.init.constant_(self.conv.bias, 0) self.conv = nn.utils.weight_norm(self.conv) def init(self, x, init_scale=1.0): with torch.no_grad(): out = self(x) n_channels = out.size(1) out = out.transpose(0, 1).contiguous().view(n_channels, -1) mean = out.mean(dim=1) std = out.std(dim=1) inv_stdv = init_scale / (std + 1e-06) self.conv.weight_g.mul_(inv_stdv.view(n_channels, 1, 1)) if self.conv.bias is not None: self.conv.bias.add_(-mean).mul_(inv_stdv) return self(x) def extra_repr(self): return self.conv.extra_repr() def forward(self, input_0): primals_3 = self.conv.bias primals_1 = self.conv.weight_g primals_2 = self.conv.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
juheeuu/flowseq
Conv1dWeightNorm
false
12,657
[ "Apache-2.0" ]
0
e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
https://github.com/juheeuu/flowseq/tree/e6e50406656335ff7a2f9ed4bd81d7cc7d1195fb
Scaler
import torch from abc import ABC class BaseOperator(ABC): """ Abstract class defining the basic structure for operator implementations in Hummingbird. """ def __init__(self, regression=False, classification=False, transformer= False, anomaly_detection=False, **kwargs): super().__init__() self.regression = regression self.classification = classification self.transformer = transformer self.anomaly_detection = anomaly_detection class Scaler(BaseOperator, torch.nn.Module): """ Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max. """ def __init__(self, offset, scale, device): super(Scaler, self).__init__(transformer=True) self.offset = offset self.scale = scale if offset is not None: self.offset = torch.nn.Parameter(torch.FloatTensor([offset]), requires_grad=False) if scale is not None: self.scale = torch.nn.Parameter(torch.FloatTensor([scale]), requires_grad=False) def forward(self, x): if self.offset is not None: x = x - self.offset if self.scale is not None: x = x * self.scale return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'offset': 4, 'scale': 1.0, 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from abc import ABC assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp3 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (1,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sub_0[grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class BaseOperator(ABC): """ Abstract class defining the basic structure for operator implementations in Hummingbird. """ def __init__(self, regression=False, classification=False, transformer= False, anomaly_detection=False, **kwargs): super().__init__() self.regression = regression self.classification = classification self.transformer = transformer self.anomaly_detection = anomaly_detection class ScalerNew(BaseOperator, torch.nn.Module): """ Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max. """ def __init__(self, offset, scale, device): super(ScalerNew, self).__init__(transformer=True) self.offset = offset self.scale = scale if offset is not None: self.offset = torch.nn.Parameter(torch.FloatTensor([offset]), requires_grad=False) if scale is not None: self.scale = torch.nn.Parameter(torch.FloatTensor([scale]), requires_grad=False) def forward(self, input_0): arg0_1 = self.offset arg2_1 = self.scale arg1_1 = input_0 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
kernc/hummingbird
Scaler
false
12,658
[ "MIT" ]
0
8c9d5b1f19054d521b22ad7fcffa8ee10e405ac3
https://github.com/kernc/hummingbird/tree/8c9d5b1f19054d521b22ad7fcffa8ee10e405ac3
ConConv
import torch import torch.nn as nn class ConConv(nn.Module): def __init__(self, inplanes_x1, inplanes_x2, planes): super(ConConv, self).__init__() self.conv = nn.Conv2d(inplanes_x1 + inplanes_x2, planes, kernel_size=1, bias=True) def forward(self, x1, x2): x1 = torch.cat([x2, x1], dim=1) x1 = self.conv(x1) return x1 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes_x1': 4, 'inplanes_x2': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf2, primals_3, buf0 class ConConvNew(nn.Module): def __init__(self, inplanes_x1, inplanes_x2, planes): super(ConConvNew, self).__init__() self.conv = nn.Conv2d(inplanes_x1 + inplanes_x2, planes, kernel_size=1, bias=True) def forward(self, input_0, input_1): primals_3 = self.conv.weight primals_4 = self.conv.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
karoly-hars/DE_hybrid_CNN
ConConv
false
12,659
[ "BSD-3-Clause" ]
0
d74ba4291d6db335151d5262ab96e8e3806a7587
https://github.com/karoly-hars/DE_hybrid_CNN/tree/d74ba4291d6db335151d5262ab96e8e3806a7587
Net_mish_ranger
import torch import torch.nn.functional as F def mish(x): return x * torch.tanh(F.softplus(x)) class Net_mish_ranger(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net_mish_ranger, self).__init__() self.hidden1 = torch.nn.Linear(n_feature, n_hidden) self.hidden2 = torch.nn.Linear(n_hidden, n_hidden) self.hidden3 = torch.nn.Linear(n_hidden, n_output) self.predict = torch.nn.Linear(n_output, n_output) def forward(self, x): x = mish(self.hidden1(x)) x = mish(self.hidden2(x)) x = mish(self.hidden3(x)) x = self.predict(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_feature': 4, 'n_hidden': 4, 'n_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_softplus_tanh_0[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_softplus_tanh_0[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), primals_8, primals_6, primals_4 def mish(x): return x * torch.tanh(F.softplus(x)) class Net_mish_rangerNew(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net_mish_rangerNew, self).__init__() self.hidden1 = torch.nn.Linear(n_feature, n_hidden) self.hidden2 = torch.nn.Linear(n_hidden, n_hidden) self.hidden3 = torch.nn.Linear(n_hidden, n_output) self.predict = torch.nn.Linear(n_output, n_output) def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.hidden3.weight primals_7 = self.hidden3.bias primals_8 = self.predict.weight primals_9 = self.predict.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
kartheikiyer/dense_basis_toolbelt
Net_mish_ranger
false
12,660
[ "MIT" ]
0
5cae6e8f4ea6983fba3625f47413d40d6b3bc6e4
https://github.com/kartheikiyer/dense_basis_toolbelt/tree/5cae6e8f4ea6983fba3625f47413d40d6b3bc6e4