entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
AttentionConditioningLayer
import torch import torch.utils.data from torch import nn class ConvNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNorm, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, signal): conv_signal = self.conv(signal) return conv_signal class AttentionConditioningLayer(nn.Module): """Adapted from the LocationLayer in https://github.com/NVIDIA/tacotron2/blob/master/model.py 1D Conv model over a concatenation of the previous attention and the accumulated attention values """ def __init__(self, input_dim=2, attention_n_filters=32, attention_kernel_sizes=[5, 3], attention_dim=640): super(AttentionConditioningLayer, self).__init__() self.location_conv_hidden = ConvNorm(input_dim, attention_n_filters, kernel_size=attention_kernel_sizes[0], padding=None, bias=True, stride=1, dilation=1, w_init_gain='relu') self.location_conv_out = ConvNorm(attention_n_filters, attention_dim, kernel_size=attention_kernel_sizes[1], padding= None, bias=True, stride=1, dilation=1, w_init_gain='sigmoid') self.conv_layers = nn.Sequential(self.location_conv_hidden, nn.ReLU (), self.location_conv_out, nn.Sigmoid()) def forward(self, attention_weights_cat): return self.conv_layers(attention_weights_cat) def get_inputs(): return [torch.rand([4, 2, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 640 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (32, 2, 5), (10, 5, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 2, 64), (128, 64, 1)) assert_size_stride(primals_4, (640, 32, 3), (96, 3, 1)) assert_size_stride(primals_5, (640,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64), (2048, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(8192)](buf1, primals_2, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 640, 64), (40960, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_sigmoid_1[grid(163840)](buf3, primals_5, 163840, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf3 class ConvNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNorm, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, signal): conv_signal = self.conv(signal) return conv_signal class AttentionConditioningLayerNew(nn.Module): """Adapted from the LocationLayer in https://github.com/NVIDIA/tacotron2/blob/master/model.py 1D Conv model over a concatenation of the previous attention and the accumulated attention values """ def __init__(self, input_dim=2, attention_n_filters=32, attention_kernel_sizes=[5, 3], attention_dim=640): super(AttentionConditioningLayerNew, self).__init__() self.location_conv_hidden = ConvNorm(input_dim, attention_n_filters, kernel_size=attention_kernel_sizes[0], padding=None, bias=True, stride=1, dilation=1, w_init_gain='relu') self.location_conv_out = ConvNorm(attention_n_filters, attention_dim, kernel_size=attention_kernel_sizes[1], padding= None, bias=True, stride=1, dilation=1, w_init_gain='sigmoid') self.conv_layers = nn.Sequential(self.location_conv_hidden, nn.ReLU (), self.location_conv_out, nn.Sigmoid()) def forward(self, input_0): primals_1 = self.location_conv_hidden.conv.weight primals_2 = self.location_conv_hidden.conv.bias primals_4 = self.location_conv_out.conv.weight primals_5 = self.location_conv_out.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
zachwe/flowtron
AttentionConditioningLayer
false
13,171
[ "Apache-2.0" ]
0
28da7fbdb8c2851c835a355ae5cce45cc30bbc84
https://github.com/zachwe/flowtron/tree/28da7fbdb8c2851c835a355ae5cce45cc30bbc84
FastSigmoid
import torch import torch.utils.data import torch import torch.nn as nn class FastSigmoid(nn.Module): def __init__(self): super(FastSigmoid, self).__init__() def forward(self, x): abs = torch.abs(x) + 1 return torch.div(x, abs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.abs(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FastSigmoidNew(nn.Module): def __init__(self): super(FastSigmoidNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zhuxyme/zxySRFBN_CVPR2019
FastSigmoid
false
13,172
[ "MIT" ]
0
c1afe776e7759bc05f2235b6db708e337cf2ae0e
https://github.com/zhuxyme/zxySRFBN_CVPR2019/tree/c1afe776e7759bc05f2235b6db708e337cf2ae0e
LanguageModelCriterion
import torch import torch.nn as nn from torch.autograd import * def to_contiguous(tensor): if tensor.is_contiguous(): return tensor else: return tensor.contiguous() class LanguageModelCriterion(nn.Module): def __init__(self): super(LanguageModelCriterion, self).__init__() def forward(self, input, target, mask): target = target[:, :input.size(1)] mask = mask[:, :input.size(1)] input = to_contiguous(input).view(-1, input.size(2)) target = to_contiguous(target).view(-1, 1) mask = to_contiguous(mask).view(-1, 1) output = -input.gather(1, target) * mask output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_gather_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_gather_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, def to_contiguous(tensor): if tensor.is_contiguous(): return tensor else: return tensor.contiguous() class LanguageModelCriterionNew(nn.Module): def __init__(self): super(LanguageModelCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
zhlnhn/ImageNewsMatching
LanguageModelCriterion
false
13,173
[ "MIT" ]
0
a9ebfc5f7669621cfc37510d6d9476a7b7a86eaa
https://github.com/zhlnhn/ImageNewsMatching/tree/a9ebfc5f7669621cfc37510d6d9476a7b7a86eaa
L2Norm
import torch from math import sqrt as sqrt from itertools import product as product import torch.nn as nn import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x /= norm out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from math import sqrt as sqrt from itertools import product as product import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp16 * tmp15 tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp17, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_1, primals_2, buf0, buf1, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf1, buf0 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
zhujiagang/realtime-neg
L2Norm
false
13,174
[ "MIT" ]
0
7e228edc5f2d93d0eee7f3880f0b8473d8c71d27
https://github.com/zhujiagang/realtime-neg/tree/7e228edc5f2d93d0eee7f3880f0b8473d8c71d27
SimpleNN
import torch from torch import nn class SimpleNN(nn.Module): def __init__(self, input_dim): super(SimpleNN, self).__init__() self.linear1 = nn.Linear(input_dim, 50) self.relu = nn.ReLU(inplace=True) self.linear2 = nn.Linear(50, 100) self.out = nn.Linear(100, 1) def forward(self, x): x = self.linear1(x) x = self.relu(x) x = self.linear2(x) x = self.relu(x) x = self.out(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 50 x1 = xindex // 50 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 50 * x1 + 200 * (x1 % 4 // 4) + 800 * (( 4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x3 = xindex // 1600 x5 = xindex % 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x5 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 100 x1 = xindex // 100 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 100 * x1 + 400 * (x1 % 4 // 4) + 1600 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 50), (50, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (1, 100), (100, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1, primals_2, buf9, 3200, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 50), (50, 1), torch.float32) triton_poi_fused_view_1[grid(3200)](buf1, buf2, 3200, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (50, 100), (1, 50), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf3 buf8 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(6400)](buf4, primals_5, buf8, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 100), (100, 1), torch.float32) triton_poi_fused_view_3[grid(6400)](buf4, buf5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del buf4 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf7) del primals_7 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, primals_6, buf8, primals_4, buf9 class SimpleNNNew(nn.Module): def __init__(self, input_dim): super(SimpleNNNew, self).__init__() self.linear1 = nn.Linear(input_dim, 50) self.relu = nn.ReLU(inplace=True) self.linear2 = nn.Linear(50, 100) self.out = nn.Linear(100, 1) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
zhaofeng-shu33/Learning_From_Data_2019_Fall
SimpleNN
false
13,175
[ "MIT" ]
0
3e5e1f834c8057817d2e9c3e3fc8d7880fa3a1bd
https://github.com/zhaofeng-shu33/Learning_From_Data_2019_Fall/tree/3e5e1f834c8057817d2e9c3e3fc8d7880fa3a1bd
SimpleMLP
import torch import torch.optim import torch.jit import torch.nn as nn class SimpleMLP(nn.Module): def __init__(self, num_in_features, num_out_features, neurons_per_layer): super(SimpleMLP, self).__init__() self.act = nn.ELU() self.l_in = nn.Linear(in_features=num_in_features, out_features= neurons_per_layer) self.l1 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l2 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l3 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l4 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l5 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l6 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l_out = nn.Linear(in_features=neurons_per_layer, out_features= num_out_features) torch.nn.init.xavier_normal_(self.l_in.weight) torch.nn.init.zeros_(self.l_in.bias) torch.nn.init.xavier_normal_(self.l1.weight) torch.nn.init.zeros_(self.l1.bias) torch.nn.init.xavier_normal_(self.l2.weight) torch.nn.init.zeros_(self.l2.bias) torch.nn.init.xavier_normal_(self.l3.weight) torch.nn.init.zeros_(self.l3.bias) torch.nn.init.xavier_normal_(self.l4.weight) torch.nn.init.zeros_(self.l4.bias) torch.nn.init.xavier_normal_(self.l5.weight) torch.nn.init.zeros_(self.l5.bias) torch.nn.init.xavier_normal_(self.l6.weight) torch.nn.init.zeros_(self.l6.bias) torch.nn.init.xavier_normal_(self.l_out.weight) torch.nn.init.zeros_(self.l_out.bias) def forward(self, x): x = self.act(self.l_in(x)) x = self.act(self.l1(x)) x = self.act(self.l2(x)) x = self.act(self.l3(x)) x = self.act(self.l4(x)) x = self.act(self.l5(x)) x = self.act(self.l6(x)) x = self.l_out(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_in_features': 4, 'num_out_features': 4, 'neurons_per_layer': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.optim import torch.jit import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 1), (1, 1)) assert_size_stride(primals_9, (1,), (1,)) assert_size_stride(primals_10, (1, 1), (1, 1)) assert_size_stride(primals_11, (1,), (1,)) assert_size_stride(primals_12, (1, 1), (1, 1)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (1, 1), (1, 1)) assert_size_stride(primals_15, (1,), (1,)) assert_size_stride(primals_16, (4, 1), (1, 1)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 1), ( 1, 0), 0), primals_4, alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_elu_0[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 1), ( 1, 0), 0), primals_6, alpha=1, beta=1, out=buf7) del primals_7 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_elu_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf8, (64, 1), ( 1, 0), 0), primals_8, alpha=1, beta=1, out=buf10) del primals_9 buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_elu_0[grid(64)](buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 1), (1, 0), 0), primals_10, alpha=1, beta=1, out=buf13) del primals_11 buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_elu_0[grid(64)](buf13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf14, (64, 1), (1, 0), 0), primals_12, alpha=1, beta=1, out=buf16) del primals_13 buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_elu_0[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf17, (64, 1), (1, 0), 0), primals_14, alpha=1, beta=1, out=buf19) del primals_15 buf20 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_elu_0[grid(64)](buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_17, reinterpret_tensor(buf20, (64, 1), (1, 0), 0), reinterpret_tensor(primals_16, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf21) del primals_17 return (reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 1), (1, 1), 0), buf4, reinterpret_tensor(buf5, (64, 1), (1, 1), 0), buf7, reinterpret_tensor(buf8, (64, 1), (1, 1), 0), buf10, reinterpret_tensor(buf11, (64, 1), (1, 1), 0), buf13, reinterpret_tensor(buf14, (64, 1), (1, 1), 0), buf16, reinterpret_tensor(buf17, (64, 1), (1, 1), 0), buf19, reinterpret_tensor(buf20, (64, 1), (1, 1), 0), primals_16, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4) class SimpleMLPNew(nn.Module): def __init__(self, num_in_features, num_out_features, neurons_per_layer): super(SimpleMLPNew, self).__init__() self.act = nn.ELU() self.l_in = nn.Linear(in_features=num_in_features, out_features= neurons_per_layer) self.l1 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l2 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l3 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l4 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l5 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l6 = nn.Linear(in_features=neurons_per_layer, out_features= neurons_per_layer) self.l_out = nn.Linear(in_features=neurons_per_layer, out_features= num_out_features) torch.nn.init.xavier_normal_(self.l_in.weight) torch.nn.init.zeros_(self.l_in.bias) torch.nn.init.xavier_normal_(self.l1.weight) torch.nn.init.zeros_(self.l1.bias) torch.nn.init.xavier_normal_(self.l2.weight) torch.nn.init.zeros_(self.l2.bias) torch.nn.init.xavier_normal_(self.l3.weight) torch.nn.init.zeros_(self.l3.bias) torch.nn.init.xavier_normal_(self.l4.weight) torch.nn.init.zeros_(self.l4.bias) torch.nn.init.xavier_normal_(self.l5.weight) torch.nn.init.zeros_(self.l5.bias) torch.nn.init.xavier_normal_(self.l6.weight) torch.nn.init.zeros_(self.l6.bias) torch.nn.init.xavier_normal_(self.l_out.weight) torch.nn.init.zeros_(self.l_out.bias) def forward(self, input_0): primals_1 = self.l_in.weight primals_2 = self.l_in.bias primals_4 = self.l1.weight primals_5 = self.l1.bias primals_6 = self.l2.weight primals_7 = self.l2.bias primals_8 = self.l3.weight primals_9 = self.l3.bias primals_10 = self.l4.weight primals_11 = self.l4.bias primals_12 = self.l5.weight primals_13 = self.l5.bias primals_14 = self.l6.weight primals_15 = self.l6.bias primals_16 = self.l_out.weight primals_17 = self.l_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
zhaofeng-shu33/deep_euler_tests
SimpleMLP
false
13,176
[ "MIT" ]
0
a3d0961af679d490b0c58873ee0726234122bc7a
https://github.com/zhaofeng-shu33/deep_euler_tests/tree/a3d0961af679d490b0c58873ee0726234122bc7a
PSNR
import torch import torch as th class PSNR(th.nn.Module): def __init__(self): super(PSNR, self).__init__() self.mse = th.nn.MSELoss() def forward(self, out, ref): mse = self.mse(out, ref) return -10 * th.log10(mse + 1e-12) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1e-12 tmp10 = tmp8 + tmp9 tmp11 = libdevice.log10(tmp10) tmp12 = -10.0 tmp13 = tmp11 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log10_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class PSNRNew(th.nn.Module): def __init__(self): super(PSNRNew, self).__init__() self.mse = th.nn.MSELoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
zsinsense/demosaicnet
PSNR
false
13,177
[ "MIT" ]
0
bbe8151cab86dbe46b76806cf9ec353994b389ff
https://github.com/zsinsense/demosaicnet/tree/bbe8151cab86dbe46b76806cf9ec353994b389ff
APLoss_dist
import torch import numpy as np import torch.nn as nn def sim_to_dist(scores): return 1 - torch.sqrt(2.001 - 2 * scores) class APLoss(nn.Module): """ Differentiable AP loss, through quantization. From the paper: Learning with Average Precision: Training Image Retrieval with a Listwise Loss Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza https://arxiv.org/abs/1906.07589 Input: (N, M) values in [min, max] label: (N, M) values in {0, 1} Returns: 1 - mAP (mean AP for each n in {1..N}) Note: typically, this is what you wanna minimize """ def __init__(self, nq=25, min=0, max=1): nn.Module.__init__(self) assert isinstance(nq, int) and 2 <= nq <= 100 self.nq = nq self.min = min self.max = max gap = max - min assert gap > 0 self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True) q.weight = nn.Parameter(q.weight.detach(), requires_grad=False) q.bias = nn.Parameter(q.bias.detach(), requires_grad=False) a = (nq - 1) / gap q.weight[:nq] = -a q.bias[:nq] = torch.from_numpy(a * min + np.arange(nq, 0, -1)) q.weight[nq:] = a q.bias[nq:] = torch.from_numpy(np.arange(2 - nq, 2, 1) - a * min) q.weight[0] = q.weight[-1] = 0 q.bias[0] = q.bias[-1] = 1 def forward(self, x, label, qw=None, ret='1-mAP'): assert x.shape == label.shape N, M = x.shape q = self.quantizer(x.unsqueeze(1)) q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0) nbs = q.sum(dim=-1) rec = (q * label.view(N, 1, M).float()).sum(dim=-1) prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1)) rec /= rec.sum(dim=-1).unsqueeze(1) ap = (prec * rec).sum(dim=-1) if ret == '1-mAP': if qw is not None: ap *= qw return 1 - ap.mean() elif ret == 'AP': assert qw is None return ap else: raise ValueError('Bad return type for APLoss(): %s' % str(ret)) def measures(self, x, gt, loss=None): if loss is None: loss = self.forward(x, gt) return {'loss_ap': float(loss)} class APLoss_dist(APLoss): def forward(self, x, label, **kw): d = sim_to_dist(x) return APLoss.forward(self, d, label, **kw) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_rsub_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 2.001 tmp4 = tmp3 - tmp2 tmp5 = libdevice.sqrt(tmp4) tmp6 = 1.0 tmp7 = tmp6 - tmp5 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 25 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0 ) tmp3 = tl.load(in_ptr0 + (100 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr1 + (25 + r1), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr0 + (101 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (2 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr0 + (102 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr0 + (3 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr0 + (103 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.minimum(tmp2, tmp5) tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp10 = tmp8 * tmp9 tmp12 = tmp11 + tmp1 tmp14 = tmp13 + tmp4 tmp15 = triton_helpers.minimum(tmp12, tmp14) tmp16 = triton_helpers.maximum(tmp15, tmp7) tmp18 = tmp16 * tmp17 tmp19 = tmp10 + tmp18 tmp21 = tmp20 + tmp1 tmp23 = tmp22 + tmp4 tmp24 = triton_helpers.minimum(tmp21, tmp23) tmp25 = triton_helpers.maximum(tmp24, tmp7) tmp27 = tmp25 * tmp26 tmp28 = tmp19 + tmp27 tmp30 = tmp29 + tmp1 tmp32 = tmp31 + tmp4 tmp33 = triton_helpers.minimum(tmp30, tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp7) tmp36 = tmp34 * tmp35 tmp37 = tmp28 + tmp36 tmp38 = tmp8 + tmp16 tmp39 = tmp38 + tmp25 tmp40 = tmp39 + tmp34 tmp41 = tmp40.to(tl.float32) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp43, = tl.associative_scan((tmp42,), 1, _triton_helper_fn_add0) tmp44 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp46 = tl.where(rmask & xmask, tmp44, 0) tmp47 = tl.sum(tmp46, 1)[:, None] tmp48 = tmp37.to(tl.float32) tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp50, = tl.associative_scan((tmp49,), 1, _triton_helper_fn_add0) tmp51 = 1e-16 tmp52 = tmp43 + tmp51 tmp53 = tmp50 / tmp52 tmp54 = tmp37 / tmp47 tmp55 = tmp53 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(rmask & xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tl.store(in_out_ptr1 + x0, tmp59, xmask) @triton.jit def triton_per_fused_mean_rsub_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 4.0 tmp5 = tmp3 / tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (50, 1, 1), (1, 1, 1)) assert_size_stride(arg3_1, (50,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_rsub_sqrt_0[grid(16)](arg0_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 4 ), (4, 0, 1), 0), arg2_1, stride=(1,), padding=(0,), dilation=( 1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 50, 4), (200, 4, 1)) del arg2_1 del buf0 buf6 = empty_strided_cuda((4,), (1,), torch.float32) buf7 = buf6 del buf6 triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1[grid(4)](buf7, buf1, arg3_1, arg1_1, 4, 25, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del arg3_1 del buf1 buf8 = empty_strided_cuda((), (), torch.float32) buf9 = buf8 del buf8 triton_per_fused_mean_rsub_2[grid(1)](buf9, buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf7 return buf9, def sim_to_dist(scores): return 1 - torch.sqrt(2.001 - 2 * scores) class APLoss(nn.Module): """ Differentiable AP loss, through quantization. From the paper: Learning with Average Precision: Training Image Retrieval with a Listwise Loss Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza https://arxiv.org/abs/1906.07589 Input: (N, M) values in [min, max] label: (N, M) values in {0, 1} Returns: 1 - mAP (mean AP for each n in {1..N}) Note: typically, this is what you wanna minimize """ def __init__(self, nq=25, min=0, max=1): nn.Module.__init__(self) assert isinstance(nq, int) and 2 <= nq <= 100 self.nq = nq self.min = min self.max = max gap = max - min assert gap > 0 self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True) q.weight = nn.Parameter(q.weight.detach(), requires_grad=False) q.bias = nn.Parameter(q.bias.detach(), requires_grad=False) a = (nq - 1) / gap q.weight[:nq] = -a q.bias[:nq] = torch.from_numpy(a * min + np.arange(nq, 0, -1)) q.weight[nq:] = a q.bias[nq:] = torch.from_numpy(np.arange(2 - nq, 2, 1) - a * min) q.weight[0] = q.weight[-1] = 0 q.bias[0] = q.bias[-1] = 1 def forward(self, x, label, qw=None, ret='1-mAP'): assert x.shape == label.shape N, M = x.shape q = self.quantizer(x.unsqueeze(1)) q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0) nbs = q.sum(dim=-1) rec = (q * label.view(N, 1, M).float()).sum(dim=-1) prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1)) rec /= rec.sum(dim=-1).unsqueeze(1) ap = (prec * rec).sum(dim=-1) if ret == '1-mAP': if qw is not None: ap *= qw return 1 - ap.mean() elif ret == 'AP': assert qw is None return ap else: raise ValueError('Bad return type for APLoss(): %s' % str(ret)) def measures(self, x, gt, loss=None): if loss is None: loss = self.forward(x, gt) return {'loss_ap': float(loss)} class APLoss_distNew(APLoss): def forward(self, input_0, input_1): arg2_1 = self.quantizer.weight arg3_1 = self.quantizer.bias arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
zhangxue123/deep-image-retrieval
APLoss_dist
false
13,178
[ "BSD-3-Clause" ]
0
ac188856fa5a034aed3f7ed3fb617d580da44462
https://github.com/zhangxue123/deep-image-retrieval/tree/ac188856fa5a034aed3f7ed3fb617d580da44462
ClusterAssignment
import torch import torch.nn as nn from torch.nn import Parameter from typing import Optional class ClusterAssignment(nn.Module): def __init__(self, cluster_number: 'int', embedding_dimension: 'int', alpha: 'float'=1.0, cluster_centers: 'Optional[torch.Tensor]'=None ) ->None: """ Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi, where the Student's t-distribution is used measure similarity between feature vector and each cluster centroid. :param cluster_number: number of clusters :param embedding_dimension: embedding dimension of feature vectors :param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0 :param cluster_centers: clusters centers to initialise, if None then use Xavier uniform """ super(ClusterAssignment, self).__init__() self.embedding_dimension = embedding_dimension self.cluster_number = cluster_number self.alpha = alpha if cluster_centers is None: initial_cluster_centers = torch.zeros(self.cluster_number, self .embedding_dimension, dtype=torch.float) nn.init.xavier_uniform_(initial_cluster_centers) else: initial_cluster_centers = cluster_centers self.cluster_centers = Parameter(initial_cluster_centers) def forward(self, batch: 'torch.Tensor') ->torch.Tensor: """ Compute the soft assignment for a batch of feature vectors, returning a batch of assignments for each cluster. :param batch: FloatTensor of [batch size, embedding dimension] :return: FloatTensor [batch size, number of clusters] """ norm_squared = torch.sum((batch.unsqueeze(1) - self.cluster_centers ) ** 2, 2) numerator = 1.0 / (1.0 + norm_squared / self.alpha) power = float(self.alpha + 1) / 2 numerator = numerator ** power return numerator / torch.sum(numerator, dim=1, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cluster_number': 4, 'embedding_dimension': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 - tmp1 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 - tmp1 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 - tmp1 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 1.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 + tmp16 tmp19 = tl.full([1], 1, tl.int32) tmp20 = tmp19 / tmp18 tmp21 = tmp20 * tmp16 tmp22 = tmp21 / tmp21 tl.store(in_out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0[grid(64)](buf1, primals_1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, primals_1, primals_2 class ClusterAssignmentNew(nn.Module): def __init__(self, cluster_number: 'int', embedding_dimension: 'int', alpha: 'float'=1.0, cluster_centers: 'Optional[torch.Tensor]'=None ) ->None: """ Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi, where the Student's t-distribution is used measure similarity between feature vector and each cluster centroid. :param cluster_number: number of clusters :param embedding_dimension: embedding dimension of feature vectors :param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0 :param cluster_centers: clusters centers to initialise, if None then use Xavier uniform """ super(ClusterAssignmentNew, self).__init__() self.embedding_dimension = embedding_dimension self.cluster_number = cluster_number self.alpha = alpha if cluster_centers is None: initial_cluster_centers = torch.zeros(self.cluster_number, self .embedding_dimension, dtype=torch.float) nn.init.xavier_uniform_(initial_cluster_centers) else: initial_cluster_centers = cluster_centers self.cluster_centers = Parameter(initial_cluster_centers) def forward(self, input_0): primals_2 = self.cluster_centers primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
zhyhan/pt-dec
ClusterAssignment
false
13,179
[ "MIT" ]
0
52aef59e508c8e7ffdde0fd7bea84570a7571b2a
https://github.com/zhyhan/pt-dec/tree/52aef59e508c8e7ffdde0fd7bea84570a7571b2a
Similarity
import torch import torch.nn as nn import torch.nn.functional as F class Similarity(nn.Module): def __init__(self, cuda, mem_dim, hidden_dim, num_classes): super(Similarity, self).__init__() self.cudaFlag = cuda self.mem_dim = mem_dim self.hidden_dim = hidden_dim self.num_classes = num_classes self.wh = nn.Linear(2 * self.mem_dim, self.hidden_dim) self.wp = nn.Linear(self.hidden_dim, self.num_classes) def forward(self, lvec, rvec): mult_dist = F.torch.mul(lvec, rvec) abs_dist = F.torch.abs(F.torch.add(lvec, -rvec)) vec_dist = F.torch.cat((mult_dist, abs_dist), 1) out = F.sigmoid(self.wh(vec_dist)) out = F.log_softmax(self.wp(out)) return out def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'cuda': False, 'mem_dim': 4, 'hidden_dim': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr0 + (4 * x1 + (-4 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = -tmp14 tmp16 = tmp13 + tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp9, tmp19) tl.store(out_ptr0 + x2, tmp20, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_2, primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_sigmoid_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__log_softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 return buf5, buf0, buf2, buf5, primals_5 class SimilarityNew(nn.Module): def __init__(self, cuda, mem_dim, hidden_dim, num_classes): super(SimilarityNew, self).__init__() self.cudaFlag = cuda self.mem_dim = mem_dim self.hidden_dim = hidden_dim self.num_classes = num_classes self.wh = nn.Linear(2 * self.mem_dim, self.hidden_dim) self.wp = nn.Linear(self.hidden_dim, self.num_classes) def forward(self, input_0, input_1): primals_3 = self.wh.weight primals_4 = self.wh.bias primals_1 = self.wp.weight primals_6 = self.wp.bias primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
zhu-y11/multilingual_treelstm
Similarity
false
13,180
[ "MIT" ]
0
39c211f3c03db733f776aa8fe73cd615aaa47465
https://github.com/zhu-y11/multilingual_treelstm/tree/39c211f3c03db733f776aa8fe73cd615aaa47465
NonLocalBlock2D
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.model_zoo class NonLocalBlock2D(nn.Module): def __init__(self, in_channels, inter_channels): super(NonLocalBlock2D, self).__init__() self.in_channels = in_channels self.inter_channels = inter_channels self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self. inter_channels, kernel_size=1, stride=1, padding=0) self.W = nn.Conv2d(in_channels=self.inter_channels, out_channels= self.in_channels, kernel_size=1, stride=1, padding=0) nn.init.constant(self.W.weight, 0) nn.init.constant(self.W.bias, 0) self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels= self.inter_channels, kernel_size=1, stride=1, padding=0) self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels= self.inter_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): batch_size = x.size(0) g_x = self.g(x).view(batch_size, self.inter_channels, -1) g_x = g_x.permute(0, 2, 1) theta_x = self.theta(x).view(batch_size, self.inter_channels, -1) theta_x = theta_x.permute(0, 2, 1) phi_x = self.phi(x).view(batch_size, self.inter_channels, -1) f = torch.matmul(theta_x, phi_x) f_div_C = F.softmax(f, dim=1) y = torch.matmul(f_div_C, g_x) y = y.permute(0, 2, 1).contiguous() y = y.view(batch_size, self.inter_channels, *x.size()[2:]) W_y = self.W(y) z = W_y + x return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'inter_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.model_zoo assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 256 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__softmax_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 256 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp3 / tmp4 tl.store(in_out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf4, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32) triton_per_fused__softmax_1[grid(64)](buf5, buf6, buf7, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) buf8 = buf5 del buf5 triton_poi_fused__softmax_2[grid(1024)](buf8, buf6, buf7, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del buf7 buf9 = buf0 del buf0 triton_poi_fused_convolution_0[grid(256)](buf9, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf10 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 16, 4), (64, 1, 16), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 16)](buf10, buf11, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del buf10 buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1)) buf13 = buf12 del buf12 triton_poi_fused_add_convolution_4[grid(256)](buf13, primals_9, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return (buf13, primals_1, primals_2, primals_4, primals_6, primals_8, buf8, reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf9, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0)) class NonLocalBlock2DNew(nn.Module): def __init__(self, in_channels, inter_channels): super(NonLocalBlock2DNew, self).__init__() self.in_channels = in_channels self.inter_channels = inter_channels self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self. inter_channels, kernel_size=1, stride=1, padding=0) self.W = nn.Conv2d(in_channels=self.inter_channels, out_channels= self.in_channels, kernel_size=1, stride=1, padding=0) nn.init.constant(self.W.weight, 0) nn.init.constant(self.W.bias, 0) self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels= self.inter_channels, kernel_size=1, stride=1, padding=0) self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels= self.inter_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_2 = self.g.weight primals_3 = self.g.bias primals_4 = self.W.weight primals_5 = self.W.bias primals_6 = self.theta.weight primals_7 = self.theta.bias primals_8 = self.phi.weight primals_9 = self.phi.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
zhouhuanxiang/EDSR-PyTorch
NonLocalBlock2D
false
13,181
[ "MIT" ]
0
ca2f0eea49476a0acde59dd76aa4ae257389d98c
https://github.com/zhouhuanxiang/EDSR-PyTorch/tree/ca2f0eea49476a0acde59dd76aa4ae257389d98c
Value
import torch import torch.nn as nn class Value(nn.Module): def __init__(self, num_inputs): super(Value, self).__init__() self.affine1 = nn.Linear(num_inputs, 64) self.affine2 = nn.Linear(64, 64) self.value_head = nn.Linear(64, 1) self.value_head.weight.data.mul_(0.1) self.value_head.bias.data.mul_(0.0) def forward(self, x): x = torch.tanh(self.affine1(x)) x = torch.tanh(self.affine2(x)) state_values = self.value_head(x) return state_values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (1, 64), (64, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class ValueNew(nn.Module): def __init__(self, num_inputs): super(ValueNew, self).__init__() self.affine1 = nn.Linear(num_inputs, 64) self.affine2 = nn.Linear(64, 64) self.value_head = nn.Linear(64, 1) self.value_head.weight.data.mul_(0.1) self.value_head.bias.data.mul_(0.0) def forward(self, input_0): primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_6 = self.value_head.weight primals_7 = self.value_head.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
zwc662/Safe_GAIL
Value
false
13,182
[ "MIT" ]
0
536dd73c91d277b418ef04efdd42aa6c87fdad33
https://github.com/zwc662/Safe_GAIL/tree/536dd73c91d277b418ef04efdd42aa6c87fdad33
AutoEncoder
import torch import torch.nn as nn import torch.utils.data class AutoEncoder(nn.Module): def __init__(self, num_question, k=100): """ Initialize a class AutoEncoder. :param num_question: int :param k: int """ super(AutoEncoder, self).__init__() self.g = nn.Linear(num_question, k) self.h = nn.Linear(k, num_question) def get_weight_norm(self): """ Return ||W^1|| + ||W^2||. :return: float """ g_w_norm = torch.norm(self.g.weight, 2) h_w_norm = torch.norm(self.h.weight, 2) return g_w_norm + h_w_norm def forward(self, inputs): """ Return a forward pass given inputs. :param inputs: user vector. :return: user vector. """ tanh = nn.Tanh() out = tanh(self.h(tanh(self.g(inputs)))) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_question': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 100), (100, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(6400)](buf1, primals_2, 6400, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_4 class AutoEncoderNew(nn.Module): def __init__(self, num_question, k=100): """ Initialize a class AutoEncoder. :param num_question: int :param k: int """ super(AutoEncoderNew, self).__init__() self.g = nn.Linear(num_question, k) self.h = nn.Linear(k, num_question) def get_weight_norm(self): """ Return ||W^1|| + ||W^2||. :return: float """ g_w_norm = torch.norm(self.g.weight, 2) h_w_norm = torch.norm(self.h.weight, 2) return g_w_norm + h_w_norm def forward(self, input_0): primals_1 = self.g.weight primals_2 = self.g.bias primals_4 = self.h.weight primals_5 = self.h.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
zuoyuwang/ML-Correctness-prediction
AutoEncoder
false
13,183
[ "MIT" ]
0
15180b73567e61cc7a5dd61b0202a42eca808734
https://github.com/zuoyuwang/ML-Correctness-prediction/tree/15180b73567e61cc7a5dd61b0202a42eca808734
ImgPatches
import torch import torch.nn as nn import torch.utils.data class ImgPatches(nn.Module): def __init__(self, input_channel=3, dim=768, patch_size=4): super().__init__() self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size= patch_size, stride=patch_size) def forward(self, img): patches = self.patch_embed(img).flatten(2).transpose(1, 2) return patches def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 2304 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 768 y1 = yindex // 768 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 196608 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (768, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (768,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((768, 3, 4, 4), (48, 1, 12, 3), torch.float32 ) get_raw_stream(0) triton_poi_fused_0[grid(2304, 16)](primals_1, buf0, 2304, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, buf0, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 768, 16, 16), (196608, 1, 12288, 768)) buf3 = empty_strided_cuda((4, 768, 16, 16), (196608, 256, 16, 1), torch.float32) triton_poi_fused_convolution_2[grid(3072, 256)](buf2, primals_2, buf3, 3072, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf2 del primals_2 return reinterpret_tensor(buf3, (4, 256, 768), (196608, 1, 256), 0 ), buf0, buf1 class ImgPatchesNew(nn.Module): def __init__(self, input_channel=3, dim=768, patch_size=4): super().__init__() self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size= patch_size, stride=patch_size) def forward(self, input_0): primals_1 = self.patch_embed.weight primals_2 = self.patch_embed.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
zoosecretbase/TransGAN
ImgPatches
false
13,184
[ "MIT" ]
0
f2546aec5b80bdddb2c8621a6e011532df3e2d73
https://github.com/zoosecretbase/TransGAN/tree/f2546aec5b80bdddb2c8621a6e011532df3e2d73
SentenceClassificationModule
from torch.nn import Module import torch from torch.nn import functional as F import torch.onnx class SentenceClassificationModule(Module): def __init__(self, input_dimensions: 'int', hidden_dimensions: 'int', dropout: 'float'=0.3): super().__init__() self.layer_1 = torch.nn.Linear(input_dimensions, hidden_dimensions) self.layer_2 = torch.nn.Linear(hidden_dimensions, 1) self.dropout = torch.nn.Dropout(p=dropout) self.norm = torch.nn.LayerNorm(hidden_dimensions) def forward(self, x): x = x.view(x.size(0), -1) x = self.layer_1(x) x = self.norm(x) x = F.relu(x) x = self.dropout(x) x = self.layer_2(x) x = torch.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dimensions': 4, 'hidden_dimensions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](buf0, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_relu_1[grid(16)](buf0, buf1, buf2, primals_4, primals_5, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del primals_5 buf4 = reinterpret_tensor(buf2, (4, 1), (1, 1), 0) del buf2 extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (4, 1), (1, 4 ), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_sigmoid_2[grid(4)](buf5, primals_7, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_7 return buf5, primals_4, primals_1, buf0, buf3, buf5, primals_6 class SentenceClassificationModuleNew(Module): def __init__(self, input_dimensions: 'int', hidden_dimensions: 'int', dropout: 'float'=0.3): super().__init__() self.layer_1 = torch.nn.Linear(input_dimensions, hidden_dimensions) self.layer_2 = torch.nn.Linear(hidden_dimensions, 1) self.dropout = torch.nn.Dropout(p=dropout) self.norm = torch.nn.LayerNorm(hidden_dimensions) def forward(self, input_0): primals_1 = self.layer_1.weight primals_3 = self.layer_1.bias primals_6 = self.layer_2.weight primals_7 = self.layer_2.bias primals_4 = self.norm.weight primals_5 = self.norm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
zolekode/flexudy-multilingual-grammar-checker
SentenceClassificationModule
false
13,185
[ "Apache-2.0" ]
0
86ea35acff0b8eea49d9b1ff9193b69eabc26ef9
https://github.com/zolekode/flexudy-multilingual-grammar-checker/tree/86ea35acff0b8eea49d9b1ff9193b69eabc26ef9
ScaledDotProductAttention
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): """ Compute 'Scaled Dot Product Attention' """ def __init__(self, dropout=0.0): """ :param dropout: attention dropout rate """ super().__init__() self.dropout = dropout def forward(self, query, key, value, mask=None): """ :param query: (batch_num, query_length, d_model) :param key: (batch_num, key_length, d_model) :param value: (batch_num, key_length, d_model) """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill_(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) p_attn = F.dropout(p_attn, p=self.dropout) return torch.matmul(p_attn, value), p_attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class ScaledDotProductAttentionNew(nn.Module): """ Compute 'Scaled Dot Product Attention' """ def __init__(self, dropout=0.0): """ :param dropout: attention dropout rate """ super().__init__() self.dropout = dropout def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
timgianitsos/squad
ScaledDotProductAttention
false
13,186
[ "MIT" ]
0
6ab502652e3528cfeeddfb8eba05221443a35294
https://github.com/timgianitsos/squad/tree/6ab502652e3528cfeeddfb8eba05221443a35294
AdaIN2d
import torch import torch.nn as nn class AdaIN2d(nn.Module): def __init__(self, in_channels, in_features): super(AdaIN2d, self).__init__() self.norm = nn.InstanceNorm2d(in_channels, affine=False, track_running_stats=False) self.net = nn.Linear(in_features, 2 * in_channels) self.reset_parameters() def forward(self, x, h): h = self.net(h) bs, fs = h.size() h.view(bs, fs, 1, 1) b, s = h.chunk(2, 1) x = self.norm(x) return x * (s + 1) + b def reset_parameters(self): nn.init.constant_(self.net.weight, 0.0) nn.init.constant_(self.net.bias, 0.0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r2 = rindex % 4 r3 = rindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (4 + r2 + 8 * r3), None, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr2 + (4 + r2), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr1 + (r2 + 8 * r3), None, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp26 = tmp24 + tmp25 tmp27 = 1.0 tmp28 = tmp26 + tmp27 tmp29 = tmp23 * tmp28 tmp32 = tmp30 + tmp31 tmp33 = tmp29 + tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp33, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4, primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del primals_2 return buf5, primals_3, primals_4, buf1, buf4 class AdaIN2dNew(nn.Module): def __init__(self, in_channels, in_features): super(AdaIN2dNew, self).__init__() self.norm = nn.InstanceNorm2d(in_channels, affine=False, track_running_stats=False) self.net = nn.Linear(in_features, 2 * in_channels) self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.net.weight, 0.0) nn.init.constant_(self.net.bias, 0.0) def forward(self, input_0, input_1): primals_1 = self.net.weight primals_2 = self.net.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
wp03052/wolf
AdaIN2d
false
13,187
[ "Apache-2.0" ]
0
49a582cafb829a2642db360c7d94c21439247ec7
https://github.com/wp03052/wolf/tree/49a582cafb829a2642db360c7d94c21439247ec7
Policy
import torch import torch.nn as nn class Policy(nn.Module): def __init__(self, num_inputs, num_outputs, discrete=False): super(Policy, self).__init__() self.discrete = discrete self.affine1 = nn.Linear(num_inputs, 64) self.affine2 = nn.Linear(64, 64) self.action_mean = nn.Linear(64, num_outputs) self.action_mean.weight.data.mul_(0.1) self.action_mean.bias.data.mul_(0.0) if self.discrete: self.action_preds = nn.Softmax() self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs)) self.saved_actions = [] self.rewards = [] self.final_value = 0 def forward(self, x): x = torch.tanh(self.affine1(x)) x = torch.tanh(self.affine2(x)) action_mean = self.action_mean(x) if self.discrete: action_mean = torch.sigmoid(action_mean) action_mean = self.action_preds(action_mean) action_log_std = self.action_log_std.expand_as(action_mean) action_std = torch.exp(action_log_std) return action_mean, action_log_std, action_std def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0 ), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_6, primals_4 class PolicyNew(nn.Module): def __init__(self, num_inputs, num_outputs, discrete=False): super(PolicyNew, self).__init__() self.discrete = discrete self.affine1 = nn.Linear(num_inputs, 64) self.affine2 = nn.Linear(64, 64) self.action_mean = nn.Linear(64, num_outputs) self.action_mean.weight.data.mul_(0.1) self.action_mean.bias.data.mul_(0.0) if self.discrete: self.action_preds = nn.Softmax() self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs)) self.saved_actions = [] self.rewards = [] self.final_value = 0 def forward(self, input_0): primals_8 = self.action_log_std primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_6 = self.action_mean.weight primals_7 = self.action_mean.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1], output[2]
zwc662/Safe_GAIL
Policy
false
13,188
[ "MIT" ]
0
536dd73c91d277b418ef04efdd42aa6c87fdad33
https://github.com/zwc662/Safe_GAIL/tree/536dd73c91d277b418ef04efdd42aa6c87fdad33
MIRB3
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, groups=3): super(ConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=1, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class ConvBlockD(nn.Module): def __init__(self, in_channels, out_channels, groups=3, ker_size=2): super(ConvBlockD, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=ker_size, dilation=ker_size, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class MIRB3(nn.Module): def __init__(self, args): super(MIRB3, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out def get_inputs(): return [torch.rand([4, 18, 64, 64])] def get_init_inputs(): return [[], {'args': _mock_config(n_feats=18)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.sqrt(tmp16) tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 108 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 6 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 18 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask) @triton.jit def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 9 rnumel = 18 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 18 x0 = xindex % 4096 x2 = xindex // 73728 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 9, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 36864 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 18, tl.int64) tmp18 = tl.load(in_ptr2 + (x0 + 4096 * (-9 + x1) + 36864 * x2), tmp15, other=0.0) tmp19 = tl.load(in_ptr3 + (-9 + x1), tmp15, eviction_policy= 'evict_last', other=0.0) tmp20 = tmp18 + tmp19 tmp21 = tmp20 > tmp8 tmp22 = tmp20 * tmp10 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp15, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp14, tmp25) tl.store(out_ptr0 + x3, tmp26, None) @triton.jit def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 18 rnumel = 18 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 9 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58 ) = args args.clear() assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1)) assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_4, (18,), (1,)) assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_7, (18,), (1,)) assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_10, (9,), (1,)) assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_13, (18,), (1,)) assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_16, (18,), (1,)) assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_19, (9,), (1,)) assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_22, (18,), (1,)) assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_25, (18,), (1,)) assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_28, (9,), (1,)) assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_31, (18,), (1,)) assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_34, (18,), (1,)) assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_37, (9,), (1,)) assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_40, (18,), (1,)) assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_43, (18,), (1,)) assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_46, (9,), (1,)) assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_49, (18,), (1,)) assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_52, (18,), (1,)) assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_55, (9,), (1,)) assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_58, (18,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_3, buf0, 18, XBLOCK=32, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_3, primals_2, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_2[grid(294912)](buf3, primals_4, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_4 buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32 ) buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf4 buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6, primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf9 buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf10, primals_9, primals_8, buf11, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_12, buf13, 18, XBLOCK=32, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_12, primals_11, buf13, buf14, 108, XBLOCK=128, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_2[grid(294912)](buf16, primals_13, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf17 buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf18, primals_15, primals_14, buf19, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_2[grid(294912)](buf21, primals_16, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_16 buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf22 buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf23, primals_18, primals_17, buf24, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf12, primals_10, buf25, primals_19, buf26, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_21, buf27, 18, XBLOCK=32, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_21, primals_20, buf27, buf28, 108, XBLOCK=128, num_warps=4, num_stages=1) buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf30 = buf29 del buf29 triton_poi_fused_convolution_2[grid(294912)](buf30, primals_22, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_22 buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf31 buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf32, primals_24, primals_23, buf33, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf35 = buf34 del buf34 triton_poi_fused_convolution_2[grid(294912)](buf35, primals_25, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf36 buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf37, primals_27, primals_26, buf38, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_30, buf40, 18, XBLOCK=32, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_30, primals_29, buf40, buf41, 108, XBLOCK=128, num_warps=4, num_stages=1) buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_2[grid(294912)](buf43, primals_31, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_31 buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf44 buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf45, primals_33, primals_32, buf46, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf48 = buf47 del buf47 triton_poi_fused_convolution_2[grid(294912)](buf48, primals_34, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_34 buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf49 buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf50, primals_36, primals_35, buf51, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf39, primals_28, buf52, primals_37, buf53, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_39, buf54, 18, XBLOCK=32, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_39, primals_38, buf54, buf55, 108, XBLOCK=128, num_warps=4, num_stages=1) buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_2[grid(294912)](buf57, primals_40, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_40 buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf58 buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf59, primals_42, primals_41, buf60, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf62 = buf61 del buf61 triton_poi_fused_convolution_2[grid(294912)](buf62, primals_43, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf63 buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf64, primals_45, primals_44, buf65, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_48, buf67, 18, XBLOCK=32, num_warps=1, num_stages=1) buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_48, primals_47, buf67, buf68, 108, XBLOCK=128, num_warps=4, num_stages=1) buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf70 = buf69 del buf69 triton_poi_fused_convolution_2[grid(294912)](buf70, primals_49, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_49 buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf71 buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf72, primals_51, primals_50, buf73, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf75 = buf74 del buf74 triton_poi_fused_convolution_2[grid(294912)](buf75, primals_52, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_52 buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf76 buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf77, primals_54, primals_53, buf78, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf66, primals_46, buf79, primals_55, buf80, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf81 buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_6[grid(18)](buf82, primals_57, primals_56, buf83, 18, 18, XBLOCK=32, num_warps=8, num_stages=1) buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf85 = buf84 del buf84 triton_poi_fused_add_convolution_7[grid(294912)](buf85, primals_58, primals_1, 294912, XBLOCK=1024, num_warps=4, num_stages=1) del primals_58 buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf79, primals_55, buf86, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf79 del primals_55 buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf66, primals_46, buf87, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf66 del primals_46 buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf52, primals_37, buf88, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf52 del primals_37 buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf39, primals_28, buf89, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf39 del primals_28 buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf25, primals_19, buf90, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf25 del primals_19 buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf12, primals_10, buf91, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf12 del primals_10 return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33, buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73, buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3, buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, groups=3): super(ConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=1, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class ConvBlockD(nn.Module): def __init__(self, in_channels, out_channels, groups=3, ker_size=2): super(ConvBlockD, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=ker_size, dilation=ker_size, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class MIRB3New(nn.Module): def __init__(self, args): super(MIRB3New, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_4 = self.conv3_1.group_conv.bias primals_2 = self.conv3_1.group_conv.weight_g primals_3 = self.conv3_1.group_conv.weight_v primals_7 = self.conv3_1.depth_conv.bias primals_5 = self.conv3_1.depth_conv.weight_g primals_6 = self.conv3_1.depth_conv.weight_v primals_10 = self.conv3_1.point_conv.bias primals_8 = self.conv3_1.point_conv.weight_g primals_9 = self.conv3_1.point_conv.weight_v primals_13 = self.convd_1.group_conv.bias primals_11 = self.convd_1.group_conv.weight_g primals_12 = self.convd_1.group_conv.weight_v primals_16 = self.convd_1.depth_conv.bias primals_14 = self.convd_1.depth_conv.weight_g primals_15 = self.convd_1.depth_conv.weight_v primals_19 = self.convd_1.point_conv.bias primals_17 = self.convd_1.point_conv.weight_g primals_18 = self.convd_1.point_conv.weight_v primals_22 = self.conv3_2.group_conv.bias primals_20 = self.conv3_2.group_conv.weight_g primals_21 = self.conv3_2.group_conv.weight_v primals_25 = self.conv3_2.depth_conv.bias primals_23 = self.conv3_2.depth_conv.weight_g primals_24 = self.conv3_2.depth_conv.weight_v primals_28 = self.conv3_2.point_conv.bias primals_26 = self.conv3_2.point_conv.weight_g primals_27 = self.conv3_2.point_conv.weight_v primals_31 = self.convd_2.group_conv.bias primals_29 = self.convd_2.group_conv.weight_g primals_30 = self.convd_2.group_conv.weight_v primals_34 = self.convd_2.depth_conv.bias primals_32 = self.convd_2.depth_conv.weight_g primals_33 = self.convd_2.depth_conv.weight_v primals_37 = self.convd_2.point_conv.bias primals_35 = self.convd_2.point_conv.weight_g primals_36 = self.convd_2.point_conv.weight_v primals_40 = self.conv3_3.group_conv.bias primals_38 = self.conv3_3.group_conv.weight_g primals_39 = self.conv3_3.group_conv.weight_v primals_43 = self.conv3_3.depth_conv.bias primals_41 = self.conv3_3.depth_conv.weight_g primals_42 = self.conv3_3.depth_conv.weight_v primals_46 = self.conv3_3.point_conv.bias primals_44 = self.conv3_3.point_conv.weight_g primals_45 = self.conv3_3.point_conv.weight_v primals_49 = self.convd_3.group_conv.bias primals_47 = self.convd_3.group_conv.weight_g primals_48 = self.convd_3.group_conv.weight_v primals_52 = self.convd_3.depth_conv.bias primals_50 = self.convd_3.depth_conv.weight_g primals_51 = self.convd_3.depth_conv.weight_v primals_55 = self.convd_3.point_conv.bias primals_53 = self.convd_3.point_conv.weight_g primals_54 = self.convd_3.point_conv.weight_v primals_58 = self.conv_last.bias primals_56 = self.conv_last.weight_g primals_57 = self.conv_last.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58]) return output[0]
wwjfsfs/wwjyyds
MIRB3
false
13,189
[ "MIT" ]
0
80cd6267fde7cd98838078a0d5178a557ceb7414
https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414
Pointer
import torch import torch.nn as nn import torch.nn.functional as F def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class Initialized_Conv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, relu=False, bias=False): super().__init__() self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride =stride, padding=padding, groups=groups, bias=bias) if relu is True: self.relu = True nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu') else: self.relu = False nn.init.xavier_uniform_(self.out.weight) def forward(self, x): if self.relu is True: return F.relu(self.out(x)) else: return self.out(x) class Pointer(nn.Module): def __init__(self, d_model): super().__init__() self.w1 = Initialized_Conv1d(d_model * 2, 1) self.w2 = Initialized_Conv1d(d_model * 2, 1) def forward(self, M1, M2, M3, mask): X1 = torch.cat([M1, M2], dim=1) X2 = torch.cat([M1, M3], dim=1) Y1 = mask_logits(self.w1(X1).squeeze(), mask) Y2 = mask_logits(self.w2(X2).squeeze(), mask) return Y1, Y2 def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x3, tmp10, xmask) tl.store(out_ptr1 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp8 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp1 tmp5 = -1e+30 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp8 * tmp1 tmp10 = tmp9 + tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, primals_3, buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4), (4, 4, 1)) buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 4), (4, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_1[grid(64)](buf2, primals_5, buf4, buf3, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del buf4 return buf3, buf5, primals_4, primals_5, primals_6, buf0, buf1 def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class Initialized_Conv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, relu=False, bias=False): super().__init__() self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride =stride, padding=padding, groups=groups, bias=bias) if relu is True: self.relu = True nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu') else: self.relu = False nn.init.xavier_uniform_(self.out.weight) def forward(self, x): if self.relu is True: return F.relu(self.out(x)) else: return self.out(x) class PointerNew(nn.Module): def __init__(self, d_model): super().__init__() self.w1 = Initialized_Conv1d(d_model * 2, 1) self.w2 = Initialized_Conv1d(d_model * 2, 1) def forward(self, input_0, input_1, input_2, input_3): primals_4 = self.w1.out.weight primals_6 = self.w2.out.weight primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 primals_5 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
timgianitsos/squad
Pointer
false
13,190
[ "MIT" ]
0
6ab502652e3528cfeeddfb8eba05221443a35294
https://github.com/timgianitsos/squad/tree/6ab502652e3528cfeeddfb8eba05221443a35294
FSPool
import torch import torch.nn as nn import torch.utils.data def deterministic_sort(s, tau): """ "Stochastic Optimization of Sorting Networks via Continuous Relaxations" https://openreview.net/forum?id=H1eSS3CcKX Aditya Grover, Eric Wang, Aaron Zweig, Stefano Ermon s: input elements to be sorted. Shape: batch_size x n x 1 tau: temperature for relaxation. Scalar. """ n = s.size()[1] one = torch.ones((n, 1), dtype=torch.float32, device=s.device) A_s = torch.abs(s - s.permute(0, 2, 1)) B = torch.matmul(A_s, torch.matmul(one, one.transpose(0, 1))) scaling = (n + 1 - 2 * (torch.arange(n, device=s.device) + 1)).type(torch .float32) C = torch.matmul(s, scaling.unsqueeze(0)) P_max = (C - B).permute(0, 2, 1) sm = torch.nn.Softmax(-1) P_hat = sm(P_max / tau) return P_hat def cont_sort(x, perm=None, temp=1): """ Helper function that calls deterministic_sort with the right shape. Since it assumes a shape of (batch_size, n, 1) while the input x is of shape (batch_size, channels, n), we can get this to the right shape by merging the first two dimensions. If an existing perm is passed in, we compute the "inverse" (transpose of perm) and just use that to unsort x. """ original_size = x.size() x = x.view(-1, x.size(2), 1) if perm is None: perm = deterministic_sort(x, temp) else: perm = perm.transpose(1, 2) x = perm.matmul(x) x = x.view(original_size) return x, perm def fill_sizes(sizes, x=None): """ sizes is a LongTensor of size [batch_size], containing the set sizes. Each set size n is turned into [0/(n-1), 1/(n-1), ..., (n-2)/(n-1), 1, 0, 0, ..., 0, 0]. These are the ratios r at which f is evaluated at. The 0s at the end are there for padding to the largest n in the batch. If the input set x is passed in, it guarantees that the mask is the correct size even when sizes.max() is less than x.size(), which can be a case if there is at least one padding element in each set in the batch. """ if x is not None: max_size = x.size(2) else: max_size = sizes.max() size_tensor = sizes.new(sizes.size(0), max_size).float().fill_(-1) size_tensor = torch.arange(end=max_size, device=sizes.device, dtype= torch.float32) size_tensor = size_tensor.unsqueeze(0) / (sizes.float() - 1).clamp(min=1 ).unsqueeze(1) mask = size_tensor <= 1 mask = mask.unsqueeze(1) return size_tensor.clamp(max=1), mask.float() class FSPool(nn.Module): """ Featurewise sort pooling. From: FSPool: Learning Set Representations with Featurewise Sort Pooling. Yan Zhang, Jonathon Hare, Adam Prügel-Bennett https://arxiv.org/abs/1906.02795 https://github.com/Cyanogenoid/fspool """ def __init__(self, in_channels, n_pieces, relaxed=False): """ in_channels: Number of channels in input n_pieces: Number of pieces in piecewise linear relaxed: Use sorting networks relaxation instead of traditional sorting """ super().__init__() self.n_pieces = n_pieces self.weight = nn.Parameter(torch.zeros(in_channels, n_pieces + 1)) self.relaxed = relaxed self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.weight) def forward(self, x, n=None): """ FSPool x: FloatTensor of shape (batch_size, in_channels, set size). This should contain the features of the elements in the set. Variable set sizes should be padded to the maximum set size in the batch with 0s. n: LongTensor of shape (batch_size). This tensor contains the sizes of each set in the batch. If not specified, assumes that every set has the same size of x.size(2). Note that n.max() should never be greater than x.size(2), i.e. the specified set size in the n tensor must not be greater than the number of elements stored in the x tensor. Returns: pooled input x, used permutation matrix perm """ assert x.size(1) == self.weight.size(0 ), 'incorrect number of input channels in weight' if n is None: n = x.new(x.size(0)).fill_(x.size(2)).long() sizes, mask = fill_sizes(n, x) mask = mask.expand_as(x) weight = self.determine_weight(sizes) x = x + (1 - mask).float() * -99999 if self.relaxed: x, perm = cont_sort(x, temp=self.relaxed) else: x, perm = x.sort(dim=2, descending=True) x = (x * weight * mask.float()).sum(dim=2) return x, perm def forward_transpose(self, x, perm, n=None): """ FSUnpool x: FloatTensor of shape (batch_size, in_channels) perm: Permutation matrix returned by forward function. n: LongTensor fo shape (batch_size) """ if n is None: n = x.new(x.size(0)).fill_(perm.size(2)).long() sizes, mask = fill_sizes(n) mask = mask.expand(mask.size(0), x.size(1), mask.size(2)) weight = self.determine_weight(sizes) x = x.unsqueeze(2) * weight * mask.float() if self.relaxed: x, _ = cont_sort(x, perm) else: x = x.scatter(2, perm, x) return x, mask def determine_weight(self, sizes): """ Piecewise linear function. Evaluates f at the ratios in sizes. This should be a faster implementation than doing the sum over max terms, since we know that most terms in it are 0. """ weight = self.weight.unsqueeze(0) weight = weight.expand(sizes.size(0), weight.size(1), weight.size(2)) index = self.n_pieces * sizes index = index.unsqueeze(1) index = index.expand(index.size(0), weight.size(1), index.size(2)) idx = index.long() frac = index.frac() left = weight.gather(2, idx) right = weight.gather(2, (idx + 1).clamp(max=self.n_pieces)) return (1 - frac) * left + frac * right def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'n_pieces': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.3333333333333333 tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.3333333333333333 tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 4, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_per_fused_add_mul_rsub_sort_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 16 * x1), xmask, other=0.0) tmp1 = x0 tmp2 = tmp1.to(tl.float32) tmp3 = 0.3333333333333333 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 <= tmp5 tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = -99999.0 tmp10 = tmp8 * tmp9 tmp11 = tmp0 + tmp10 tmp12 = r2 tmp13 = tmp12.to(tl.int16) tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp15 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16, tmp17 = triton_helpers.sort_with_index(tmp14, tmp15, None, 1, stable=False, descending=True) tl.store(out_ptr0 + (x0 + 4 * r2 + 16 * x1), tmp16, xmask) tl.store(out_ptr1 + (x0 + 4 * r2 + 16 * x1), tmp17, xmask) @triton.jit def triton_poi_fused_sort_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.int64) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_frac_gather_mul_rsub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = x0 tmp2 = tmp1.to(tl.float32) tmp3 = 0.3333333333333333 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp9 = tl_math.abs(tmp8) tmp10 = libdevice.floor(tmp9) tmp11 = tl.full([1], 0, tl.int32) tmp12 = tmp11 < tmp8 tmp13 = tmp12.to(tl.int8) tmp14 = tmp8 < tmp11 tmp15 = tmp14.to(tl.int8) tmp16 = tmp13 - tmp15 tmp17 = tmp16.to(tmp8.dtype) tmp18 = tmp10 * tmp17 tmp19 = tmp8 - tmp18 tmp20 = tmp5 - tmp19 tmp21 = tmp8.to(tl.int32) tmp22 = tl.load(in_ptr1 + (tmp21 + 5 * x1), xmask, eviction_policy= 'evict_last') tmp23 = tmp20 * tmp22 tmp24 = tl.full([1], 1, tl.int64) tmp25 = tmp21 + tmp24 tmp26 = tl.full([1], 4, tl.int64) tmp27 = triton_helpers.minimum(tmp25, tmp26) tmp28 = tl.load(in_ptr1 + (tmp27 + 5 * x1), xmask, eviction_policy= 'evict_last') tmp29 = tmp19 * tmp28 tmp30 = tmp23 + tmp29 tmp31 = tmp0 * tmp30 tmp32 = tmp4 <= tmp5 tmp33 = tmp32.to(tl.float32) tmp34 = tmp31 * tmp33 tl.store(out_ptr0 + x3, tmp34, xmask) @triton.jit def triton_poi_fused_sum_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(64)](buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused_add_clamp_1[grid(64)](buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16) triton_per_fused_add_mul_rsub_sort_2[grid(64)](primals_1, buf2, buf3, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) triton_poi_fused_sort_3[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_frac_gather_mul_rsub_4[grid(256)](buf2, primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_sum_5[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 return buf6, buf4, buf0, buf1, buf2 def deterministic_sort(s, tau): """ "Stochastic Optimization of Sorting Networks via Continuous Relaxations" https://openreview.net/forum?id=H1eSS3CcKX Aditya Grover, Eric Wang, Aaron Zweig, Stefano Ermon s: input elements to be sorted. Shape: batch_size x n x 1 tau: temperature for relaxation. Scalar. """ n = s.size()[1] one = torch.ones((n, 1), dtype=torch.float32, device=s.device) A_s = torch.abs(s - s.permute(0, 2, 1)) B = torch.matmul(A_s, torch.matmul(one, one.transpose(0, 1))) scaling = (n + 1 - 2 * (torch.arange(n, device=s.device) + 1)).type(torch .float32) C = torch.matmul(s, scaling.unsqueeze(0)) P_max = (C - B).permute(0, 2, 1) sm = torch.nn.Softmax(-1) P_hat = sm(P_max / tau) return P_hat def cont_sort(x, perm=None, temp=1): """ Helper function that calls deterministic_sort with the right shape. Since it assumes a shape of (batch_size, n, 1) while the input x is of shape (batch_size, channels, n), we can get this to the right shape by merging the first two dimensions. If an existing perm is passed in, we compute the "inverse" (transpose of perm) and just use that to unsort x. """ original_size = x.size() x = x.view(-1, x.size(2), 1) if perm is None: perm = deterministic_sort(x, temp) else: perm = perm.transpose(1, 2) x = perm.matmul(x) x = x.view(original_size) return x, perm def fill_sizes(sizes, x=None): """ sizes is a LongTensor of size [batch_size], containing the set sizes. Each set size n is turned into [0/(n-1), 1/(n-1), ..., (n-2)/(n-1), 1, 0, 0, ..., 0, 0]. These are the ratios r at which f is evaluated at. The 0s at the end are there for padding to the largest n in the batch. If the input set x is passed in, it guarantees that the mask is the correct size even when sizes.max() is less than x.size(), which can be a case if there is at least one padding element in each set in the batch. """ if x is not None: max_size = x.size(2) else: max_size = sizes.max() size_tensor = sizes.new(sizes.size(0), max_size).float().fill_(-1) size_tensor = torch.arange(end=max_size, device=sizes.device, dtype= torch.float32) size_tensor = size_tensor.unsqueeze(0) / (sizes.float() - 1).clamp(min=1 ).unsqueeze(1) mask = size_tensor <= 1 mask = mask.unsqueeze(1) return size_tensor.clamp(max=1), mask.float() class FSPoolNew(nn.Module): """ Featurewise sort pooling. From: FSPool: Learning Set Representations with Featurewise Sort Pooling. Yan Zhang, Jonathon Hare, Adam Prügel-Bennett https://arxiv.org/abs/1906.02795 https://github.com/Cyanogenoid/fspool """ def __init__(self, in_channels, n_pieces, relaxed=False): """ in_channels: Number of channels in input n_pieces: Number of pieces in piecewise linear relaxed: Use sorting networks relaxation instead of traditional sorting """ super().__init__() self.n_pieces = n_pieces self.weight = nn.Parameter(torch.zeros(in_channels, n_pieces + 1)) self.relaxed = relaxed self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.weight) def forward_transpose(self, x, perm, n=None): """ FSUnpool x: FloatTensor of shape (batch_size, in_channels) perm: Permutation matrix returned by forward function. n: LongTensor fo shape (batch_size) """ if n is None: n = x.new(x.size(0)).fill_(perm.size(2)).long() sizes, mask = fill_sizes(n) mask = mask.expand(mask.size(0), x.size(1), mask.size(2)) weight = self.determine_weight(sizes) x = x.unsqueeze(2) * weight * mask.float() if self.relaxed: x, _ = cont_sort(x, perm) else: x = x.scatter(2, perm, x) return x, mask def determine_weight(self, sizes): """ Piecewise linear function. Evaluates f at the ratios in sizes. This should be a faster implementation than doing the sum over max terms, since we know that most terms in it are 0. """ weight = self.weight.unsqueeze(0) weight = weight.expand(sizes.size(0), weight.size(1), weight.size(2)) index = self.n_pieces * sizes index = index.unsqueeze(1) index = index.expand(index.size(0), weight.size(1), index.size(2)) idx = index.long() frac = index.frac() left = weight.gather(2, idx) right = weight.gather(2, (idx + 1).clamp(max=self.n_pieces)) return (1 - frac) * left + frac * right def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
zzirnheld/dspn
FSPool
false
13,191
[ "MIT" ]
0
e0c248d9e55821847841cf0c67e97225277a6e75
https://github.com/zzirnheld/dspn/tree/e0c248d9e55821847841cf0c67e97225277a6e75
LipschitzCube
import torch import torch.nn as nn class LipschitzCube(nn.Module): def forward(self, x): return (x >= 1) * (x - 2 / 3) + (x <= -1) * (x + 2 / 3) + (x > -1) * (x < 1) * x ** 3 / 3 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 >= tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = 0.6666666666666666 tmp5 = tmp0 - tmp4 tmp6 = tmp3 * tmp5 tmp7 = -1.0 tmp8 = tmp0 <= tmp7 tmp9 = tmp8.to(tl.float32) tmp10 = tmp0 + tmp4 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tmp13 = tmp0 > tmp7 tmp14 = tmp0 < tmp1 tmp15 = tmp13 & tmp14 tmp16 = tmp15.to(tl.float32) tmp17 = tmp0 * tmp0 tmp18 = tmp17 * tmp0 tmp19 = tmp16 * tmp18 tmp20 = 0.3333333333333333 tmp21 = tmp19 * tmp20 tmp22 = tmp12 + tmp21 tl.store(out_ptr0 + x0, tmp22, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LipschitzCubeNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zxydi1992/residual-flows
LipschitzCube
false
13,192
[ "MIT" ]
0
4ec289681dc91cff5312b22f7ebed93838b440fb
https://github.com/zxydi1992/residual-flows/tree/4ec289681dc91cff5312b22f7ebed93838b440fb
ResNetBlockGroupNorm
import torch import torch.nn as nn def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class ResNetBlockGroupNorm(nn.Module): def __init__(self, inplanes, planes, num_groups, stride=1, activation= 'relu'): super(ResNetBlockGroupNorm, self).__init__() assert activation in ['relu', 'elu', 'leaky_relu'] self.conv1 = conv3x3(inplanes, planes, stride) self.gn1 = nn.GroupNorm(num_groups, planes) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.conv2 = conv3x3(planes, planes) self.gn2 = nn.GroupNorm(num_groups, planes) downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), nn.GroupNorm( num_groups, planes)) self.downsample = downsample self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.gn1.weight, 1.0) nn.init.constant_(self.gn1.bias, 0.0) nn.init.constant_(self.gn2.weight, 1.0) nn.init.constant_(self.gn2.bias, 0.0) if self.downsample is not None: assert isinstance(self.downsample[1], nn.GroupNorm) nn.init.constant_(self.downsample[1].weight, 1.0) nn.init.constant_(self.downsample[1].bias, 0.0) def init(self, x, init_scale=1.0): with torch.no_grad(): return self(x) def forward(self, x): residual = x out = self.conv1(x) out = self.gn1(out) out = self.activation(out) out = self.conv2(out) out = self.gn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4, 'num_groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp32 = 0.0 tmp33 = tmp31 <= tmp32 tl.store(out_ptr2 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr3 + (r1 + 64 * x0), tmp33, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_0[grid(4)](buf0, primals_3, primals_4, buf1, buf5, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid (4)](buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12, buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor( buf10, (4, 1), (1, 1), 0), buf12) def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class ResNetBlockGroupNormNew(nn.Module): def __init__(self, inplanes, planes, num_groups, stride=1, activation= 'relu'): super(ResNetBlockGroupNormNew, self).__init__() assert activation in ['relu', 'elu', 'leaky_relu'] self.conv1 = conv3x3(inplanes, planes, stride) self.gn1 = nn.GroupNorm(num_groups, planes) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.conv2 = conv3x3(planes, planes) self.gn2 = nn.GroupNorm(num_groups, planes) downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), nn.GroupNorm( num_groups, planes)) self.downsample = downsample self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.gn1.weight, 1.0) nn.init.constant_(self.gn1.bias, 0.0) nn.init.constant_(self.gn2.weight, 1.0) nn.init.constant_(self.gn2.bias, 0.0) if self.downsample is not None: assert isinstance(self.downsample[1], nn.GroupNorm) nn.init.constant_(self.downsample[1].weight, 1.0) nn.init.constant_(self.downsample[1].bias, 0.0) def init(self, x, init_scale=1.0): with torch.no_grad(): return self(x) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.gn1.weight primals_4 = self.gn1.bias primals_5 = self.conv2.weight primals_6 = self.gn2.weight primals_7 = self.gn2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
wp03052/wolf
ResNetBlockGroupNorm
false
13,193
[ "Apache-2.0" ]
0
49a582cafb829a2642db360c7d94c21439247ec7
https://github.com/wp03052/wolf/tree/49a582cafb829a2642db360c7d94c21439247ec7
DeResNetBlockGroupNorm
import torch import torch.nn as nn def deconv3x3(in_planes, out_planes, stride=1, output_padding=0): """3x3 deconvolution with padding""" return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride= stride, padding=1, output_padding=output_padding, bias=False) class DeResNetBlockGroupNorm(nn.Module): def __init__(self, inplanes, planes, num_groups, stride=1, output_padding=0, activation='relu'): super(DeResNetBlockGroupNorm, self).__init__() assert activation in ['relu', 'elu', 'leaky_relu'] self.deconv1 = deconv3x3(inplanes, planes, stride, output_padding) self.gn1 = nn.GroupNorm(num_groups, planes) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.deconv2 = deconv3x3(planes, planes) self.gn2 = nn.GroupNorm(num_groups, planes) downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential(nn.ConvTranspose2d(inplanes, planes, kernel_size=1, stride=stride, output_padding=output_padding, bias=False), nn.GroupNorm(num_groups, planes)) self.downsample = downsample self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.gn1.weight, 1.0) nn.init.constant_(self.gn1.bias, 0.0) nn.init.constant_(self.gn2.weight, 1.0) nn.init.constant_(self.gn2.bias, 0.0) if self.downsample is not None: assert isinstance(self.downsample[1], nn.GroupNorm) nn.init.constant_(self.downsample[1].weight, 1.0) nn.init.constant_(self.downsample[1].bias, 0.0) def init(self, x, init_scale=1.0): with torch.no_grad(): return self(x) def forward(self, x): residual = x out = self.deconv1(x) out = self.gn1(out) out = self.activation(out) out = self.deconv2(out) out = self.gn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4, 'num_groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp32 = 0.0 tmp33 = tmp31 <= tmp32 tl.store(out_ptr2 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr3 + (r1 + 64 * x0), tmp33, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_0[grid(4)](buf0, primals_3, primals_4, buf1, buf5, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid (4)](buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12, buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor( buf10, (4, 1), (1, 1), 0), buf12) def deconv3x3(in_planes, out_planes, stride=1, output_padding=0): """3x3 deconvolution with padding""" return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride= stride, padding=1, output_padding=output_padding, bias=False) class DeResNetBlockGroupNormNew(nn.Module): def __init__(self, inplanes, planes, num_groups, stride=1, output_padding=0, activation='relu'): super(DeResNetBlockGroupNormNew, self).__init__() assert activation in ['relu', 'elu', 'leaky_relu'] self.deconv1 = deconv3x3(inplanes, planes, stride, output_padding) self.gn1 = nn.GroupNorm(num_groups, planes) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.deconv2 = deconv3x3(planes, planes) self.gn2 = nn.GroupNorm(num_groups, planes) downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential(nn.ConvTranspose2d(inplanes, planes, kernel_size=1, stride=stride, output_padding=output_padding, bias=False), nn.GroupNorm(num_groups, planes)) self.downsample = downsample self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.gn1.weight, 1.0) nn.init.constant_(self.gn1.bias, 0.0) nn.init.constant_(self.gn2.weight, 1.0) nn.init.constant_(self.gn2.bias, 0.0) if self.downsample is not None: assert isinstance(self.downsample[1], nn.GroupNorm) nn.init.constant_(self.downsample[1].weight, 1.0) nn.init.constant_(self.downsample[1].bias, 0.0) def init(self, x, init_scale=1.0): with torch.no_grad(): return self(x) def forward(self, input_0): primals_2 = self.deconv1.weight primals_3 = self.gn1.weight primals_4 = self.gn1.bias primals_5 = self.deconv2.weight primals_6 = self.gn2.weight primals_7 = self.gn2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
wp03052/wolf
DeResNetBlockGroupNorm
false
13,194
[ "Apache-2.0" ]
0
49a582cafb829a2642db360c7d94c21439247ec7
https://github.com/wp03052/wolf/tree/49a582cafb829a2642db360c7d94c21439247ec7
FullSort
import torch import torch.nn as nn class FullSort(nn.Module): def forward(self, x): return torch.sort(x, 1)[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = r2 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5, _tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False) tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_sort_0[grid(64)](arg0_1, buf0, 64, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf0, class FullSortNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zxydi1992/residual-flows
FullSort
false
13,195
[ "MIT" ]
0
4ec289681dc91cff5312b22f7ebed93838b440fb
https://github.com/zxydi1992/residual-flows/tree/4ec289681dc91cff5312b22f7ebed93838b440fb
CNN
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size= 3, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = nn.Linear(8 * 8 * 16, 64) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.pool(x) x = self.conv2(x) x = F.relu(x) x = self.pool(x) x = x.view(-1, 8 * 8 * 16) x = self.fc1(x) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) x = F.log_softmax(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 8 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (8, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (64, 1024), (1024, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (32, 64), (64, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (10, 32), (32, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(131072)](buf1, primals_2, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(32768)](buf1, buf2, buf3, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(65536)](buf5, primals_5, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((16, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0 ), reinterpret_tensor(primals_6, (1024, 64), (1, 1024), 0), out =buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(1024)](buf9, primals_7, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (64, 32), (1, 64), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(512)](buf11, primals_9, 512, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((16, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (32, 10), (1, 32), 0), alpha=1, beta=1, out=buf12) del primals_11 buf15 = empty_strided_cuda((16, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_6[grid(16)](buf12, buf15, 16, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf12 return (buf15, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0), buf9, buf11, buf15, primals_10, primals_8, primals_6) class CNNNew(nn.Module): def __init__(self): super(CNNNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size= 3, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = nn.Linear(8 * 8 * 16, 64) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
zzzzzkjs/quick_draw_clone
CNN
false
13,196
[ "MIT" ]
0
a80d4c03b4cb88e31ae8e143d4042b37cdacc38e
https://github.com/zzzzzkjs/quick_draw_clone/tree/a80d4c03b4cb88e31ae8e143d4042b37cdacc38e
CQAttention
import torch import torch.nn as nn import torch.nn.functional as F def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class CQAttention(nn.Module): def __init__(self, d_model, dropout=0.1): super().__init__() w4C = torch.empty(d_model, 1) w4Q = torch.empty(d_model, 1) w4mlu = torch.empty(1, 1, d_model) nn.init.xavier_uniform_(w4C) nn.init.xavier_uniform_(w4Q) nn.init.xavier_uniform_(w4mlu) self.w4C = nn.Parameter(w4C) self.w4Q = nn.Parameter(w4Q) self.w4mlu = nn.Parameter(w4mlu) bias = torch.empty(1) nn.init.constant_(bias, 0) self.bias = nn.Parameter(bias) self.dropout = dropout def forward(self, C, Q, Cmask, Qmask): C = C.transpose(1, 2) Q = Q.transpose(1, 2) batch_size_c = C.size()[0] _batch_size, Lc, _d_model = C.shape _batch_size, Lq, _d_model = Q.shape S = self.trilinear_for_attention(C, Q) Cmask = Cmask.view(batch_size_c, Lc, 1) Qmask = Qmask.view(batch_size_c, 1, Lq) S1 = F.softmax(mask_logits(S, Qmask), dim=2) S2 = F.softmax(mask_logits(S, Cmask), dim=1) A = torch.bmm(S1, Q) B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C) out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2) return out.transpose(1, 2) def trilinear_for_attention(self, C, Q): _batch_size, Lc, _d_model = C.shape _batch_size, Lq, _d_model = Q.shape dropout = self.dropout C = F.dropout(C, p=dropout, training=self.training) Q = F.dropout(Q, p=dropout, training=self.training) subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq]) subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1] ) subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2)) res = subres0 + subres1 + subres2 res += self.bias return res def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 1]), torch.rand([4, 1, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + x4, xmask) tmp5 = tl.load(in_ptr3 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp8 = tl.load(in_ptr4 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp4 + tmp6 tmp9 = tmp7 * tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp8 tmp12 = -1e+30 tmp13 = tmp11 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp7 * tmp15 tmp17 = tmp10 - tmp15 tmp18 = tmp17 * tmp12 tmp19 = tmp16 + tmp18 tl.store(out_ptr0 + x4, tmp14, xmask) tl.store(out_ptr1 + x4, tmp19, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex // 16 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x1 + 4 * (-8 + x0) + 16 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x3 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (x1 + 4 * (-12 + x0) + 16 * x2), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr2 + (4 * x3 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__unsafe_view_clone_0[grid(16, 4)](primals_2, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf2, primals_4, out=buf3) del primals_4 buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_mul_1[grid(64)](primals_1, primals_5, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, primals_2, out=buf5) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_2[grid(64)](buf1, buf3, buf5, primals_6, primals_8, primals_7, buf6, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del buf3 del primals_6 buf7 = buf5 del buf5 triton_poi_fused__softmax_3[grid(64)](buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf7 del buf7 triton_poi_fused__softmax_5[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = buf9 del buf9 triton_poi_fused__softmax_6[grid(64)](buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = buf10 del buf10 extern_kernels.bmm(buf8, reinterpret_tensor(primals_2, (4, 4, 4), ( 16, 1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf11, (4, 4, 4), (16, 1, 4), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), out=buf14) del buf13 buf15 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_7[grid(256)](primals_1, buf12, buf14, buf15, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del buf14 return reinterpret_tensor(buf15, (4, 16, 4), (64, 1, 16), 0 ), primals_7, primals_8, reinterpret_tensor(primals_1, (4, 4, 4), ( 16, 1, 4), 0), primals_2, buf8, buf11, reinterpret_tensor(buf2, (4, 16), (1, 4), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0) def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class CQAttentionNew(nn.Module): def __init__(self, d_model, dropout=0.1): super().__init__() w4C = torch.empty(d_model, 1) w4Q = torch.empty(d_model, 1) w4mlu = torch.empty(1, 1, d_model) nn.init.xavier_uniform_(w4C) nn.init.xavier_uniform_(w4Q) nn.init.xavier_uniform_(w4mlu) self.w4C = nn.Parameter(w4C) self.w4Q = nn.Parameter(w4Q) self.w4mlu = nn.Parameter(w4mlu) bias = torch.empty(1) nn.init.constant_(bias, 0) self.bias = nn.Parameter(bias) self.dropout = dropout def trilinear_for_attention(self, C, Q): _batch_size, Lc, _d_model = C.shape _batch_size, Lq, _d_model = Q.shape dropout = self.dropout C = F.dropout(C, p=dropout, training=self.training) Q = F.dropout(Q, p=dropout, training=self.training) subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq]) subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1] ) subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2)) res = subres0 + subres1 + subres2 res += self.bias return res def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.w4C primals_4 = self.w4Q primals_5 = self.w4mlu primals_6 = self.bias primals_1 = input_0 primals_2 = input_1 primals_7 = input_2 primals_8 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
timgianitsos/squad
CQAttention
false
13,197
[ "MIT" ]
0
6ab502652e3528cfeeddfb8eba05221443a35294
https://github.com/timgianitsos/squad/tree/6ab502652e3528cfeeddfb8eba05221443a35294
LipNormConv2d
import torch import torch.nn as nn import torch.nn.functional as F def _max_except_dim(input, dim): maxed = input for axis in range(input.ndimension() - 1, dim, -1): maxed, _ = maxed.max(axis, keepdim=True) for axis in range(dim - 1, -1, -1): maxed, _ = maxed.max(axis, keepdim=True) return maxed def _norm_except_dim(w, norm_type, dim): if norm_type == 1 or norm_type == 2: return torch.norm_except_dim(w, norm_type, dim) elif norm_type == float('inf'): return _max_except_dim(w, dim) def operator_norm_settings(domain, codomain): if domain == 1 and codomain == 1: max_across_input_dims = True norm_type = 1 elif domain == 1 and codomain == 2: max_across_input_dims = True norm_type = 2 elif domain == 1 and codomain == float('inf'): max_across_input_dims = True norm_type = float('inf') elif domain == 2 and codomain == float('inf'): max_across_input_dims = False norm_type = 2 elif domain == float('inf') and codomain == float('inf'): max_across_input_dims = False norm_type = 1 else: raise ValueError('Unknown combination of domain "{}" and codomain "{}"' .format(domain, codomain)) return max_across_input_dims, norm_type def _logit(p): p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p)) return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10) class LipNormConv2d(nn.Conv2d): """Lipschitz constant defined using operator norms.""" def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, domain=float('inf'), codomain=float ('inf'), local_constraint=True, **unused_kwargs): del unused_kwargs super(LipNormConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, bias) self.coeff = coeff self.domain = domain self.codomain = codomain self.local_constraint = local_constraint max_across_input_dims, self.norm_type = operator_norm_settings(self .domain, self.codomain) self.max_across_dim = 1 if max_across_input_dims else 0 with torch.no_grad(): w_scale = _norm_except_dim(self.weight, self.norm_type, dim= self.max_across_dim) if not self.local_constraint: w_scale = w_scale.max() self.scale = nn.Parameter(_logit(w_scale / self.coeff)) def compute_weight(self): w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self. max_across_dim) if not self.local_constraint: w_scale = w_scale.max() return self.weight / w_scale * torch.sigmoid(self.scale) def forward(self, input): weight = self.compute_weight() return F.conv2d(input, weight, self.bias, self.stride, self.padding, 1, 1) def extra_repr(self): s = super(LipNormConv2d, self).extra_repr() return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self .coeff, self.domain, self.codomain, self.local_constraint) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = tmp0 / tmp5 tmp8 = tl.sigmoid(tmp7) tmp9 = tmp6 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_sigmoid_0[grid(4)](buf1, primals_1, primals_2, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 9, 9), (324, 81, 9, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(1296)](buf4, primals_3, 1296, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf4, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf2 def _max_except_dim(input, dim): maxed = input for axis in range(input.ndimension() - 1, dim, -1): maxed, _ = maxed.max(axis, keepdim=True) for axis in range(dim - 1, -1, -1): maxed, _ = maxed.max(axis, keepdim=True) return maxed def _norm_except_dim(w, norm_type, dim): if norm_type == 1 or norm_type == 2: return torch.norm_except_dim(w, norm_type, dim) elif norm_type == float('inf'): return _max_except_dim(w, dim) def operator_norm_settings(domain, codomain): if domain == 1 and codomain == 1: max_across_input_dims = True norm_type = 1 elif domain == 1 and codomain == 2: max_across_input_dims = True norm_type = 2 elif domain == 1 and codomain == float('inf'): max_across_input_dims = True norm_type = float('inf') elif domain == 2 and codomain == float('inf'): max_across_input_dims = False norm_type = 2 elif domain == float('inf') and codomain == float('inf'): max_across_input_dims = False norm_type = 1 else: raise ValueError('Unknown combination of domain "{}" and codomain "{}"' .format(domain, codomain)) return max_across_input_dims, norm_type def _logit(p): p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p)) return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10) class LipNormConv2dNew(nn.Conv2d): """Lipschitz constant defined using operator norms.""" def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, domain=float('inf'), codomain=float ('inf'), local_constraint=True, **unused_kwargs): del unused_kwargs super(LipNormConv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, bias) self.coeff = coeff self.domain = domain self.codomain = codomain self.local_constraint = local_constraint max_across_input_dims, self.norm_type = operator_norm_settings(self .domain, self.codomain) self.max_across_dim = 1 if max_across_input_dims else 0 with torch.no_grad(): w_scale = _norm_except_dim(self.weight, self.norm_type, dim= self.max_across_dim) if not self.local_constraint: w_scale = w_scale.max() self.scale = nn.Parameter(_logit(w_scale / self.coeff)) def compute_weight(self): w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self. max_across_dim) if not self.local_constraint: w_scale = w_scale.max() return self.weight / w_scale * torch.sigmoid(self.scale) def extra_repr(self): s = super(LipNormConv2dNew, self).extra_repr() return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self .coeff, self.domain, self.codomain, self.local_constraint) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = self.scale primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
zxydi1992/residual-flows
LipNormConv2d
false
13,198
[ "MIT" ]
0
4ec289681dc91cff5312b22f7ebed93838b440fb
https://github.com/zxydi1992/residual-flows/tree/4ec289681dc91cff5312b22f7ebed93838b440fb
ConvStem2
import torch import torch.nn as nn class ConvStem2(nn.Module): def __init__(self, in_chans=3, out_chans=64, kernel_size=7, stride=2): super(ConvStem2, self).__init__() self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=False) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.maxpool(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x3 = xindex // 16 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-33 + 2 * x0 + 64 * x3), tmp10, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-32 + 2 * x0 + 64 * x3), tmp16, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-31 + 2 * x0 + 64 * x3), tmp23, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 * x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 64 * x3), tmp30, eviction_policy='evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (2 * x0 + 64 * x3), tmp33, eviction_policy= 'evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x3), tmp36, eviction_policy='evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + 2 * x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (31 + 2 * x0 + 64 * x3), tmp43, eviction_policy='evict_last', other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x3), tmp46, eviction_policy='evict_last', other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x3), tmp49, eviction_policy='evict_last', other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x4, tmp51, None) tl.store(out_ptr1 + x4, tmp76, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (64, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) buf2 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(65536)](buf0, buf1, buf2, 65536, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf0, buf2 class ConvStem2New(nn.Module): def __init__(self, in_chans=3, out_chans=64, kernel_size=7, stride=2): super(ConvStem2New, self).__init__() self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=False) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
yoookoo/cnn-transformer
ConvStem2
false
13,199
[ "Apache-2.0" ]
0
8ee54ea944ed752162e3098db7f8f689ec150efe
https://github.com/yoookoo/cnn-transformer/tree/8ee54ea944ed752162e3098db7f8f689ec150efe
NICEMLPBlock
import torch import torch.nn as nn class LinearWeightNorm(nn.Module): def __init__(self, in_features, out_features, bias=True): super(LinearWeightNorm, self).__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.linear.weight, mean=0.0, std=0.05) if self.linear.bias is not None: nn.init.constant_(self.linear.bias, 0) self.linear = nn.utils.weight_norm(self.linear) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def init(self, x, init_scale=1.0): with torch.no_grad(): out = self(x).view(-1, self.linear.out_features) mean = out.mean(dim=0) std = out.std(dim=0) inv_stdv = init_scale / (std + 1e-06) self.linear.weight_g.mul_(inv_stdv.unsqueeze(1)) if self.linear.bias is not None: self.linear.bias.add_(-mean).mul_(inv_stdv) return self(x) def forward(self, input): return self.linear(input) class NICEMLPBlock(nn.Module): def __init__(self, in_features, out_features, hidden_features, activation): super(NICEMLPBlock, self).__init__() assert activation in ['relu', 'elu', 'leaky_relu'] self.fc1 = nn.Linear(in_features, hidden_features, bias=True) self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True) self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.fc1.bias, 0.0) nn.init.constant_(self.fc2.bias, 0.0) def forward(self, x): out = self.activation(self.fc1(x)) out = self.activation(self.fc2(out)) out = self.fc3(out) return out def init(self, x, init_scale=1.0): with torch.no_grad(): out = self.activation(self.fc1(x)) out = self.activation(self.fc2(out)) out = self.fc3.init(out, init_scale=0.0 * init_scale) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4, 'hidden_features': 4, 'activation': 'relu'}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__weight_norm_interface_2[grid(4)](primals_7, buf4, 4, XBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__weight_norm_interface_3[grid(16)](primals_7, primals_6, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf6, primals_5, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0) del buf6 extern_kernels.addmm(primals_8, buf7, reinterpret_tensor(buf5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_8 return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf5, primals_6, primals_7, reinterpret_tensor(primals_3, (64, 4 ), (4, 1), 0), buf2, buf4, buf7, buf5, buf9, primals_4, buf10 class LinearWeightNorm(nn.Module): def __init__(self, in_features, out_features, bias=True): super(LinearWeightNorm, self).__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.linear.weight, mean=0.0, std=0.05) if self.linear.bias is not None: nn.init.constant_(self.linear.bias, 0) self.linear = nn.utils.weight_norm(self.linear) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def init(self, x, init_scale=1.0): with torch.no_grad(): out = self(x).view(-1, self.linear.out_features) mean = out.mean(dim=0) std = out.std(dim=0) inv_stdv = init_scale / (std + 1e-06) self.linear.weight_g.mul_(inv_stdv.unsqueeze(1)) if self.linear.bias is not None: self.linear.bias.add_(-mean).mul_(inv_stdv) return self(x) def forward(self, input): return self.linear(input) class NICEMLPBlockNew(nn.Module): def __init__(self, in_features, out_features, hidden_features, activation): super(NICEMLPBlockNew, self).__init__() assert activation in ['relu', 'elu', 'leaky_relu'] self.fc1 = nn.Linear(in_features, hidden_features, bias=True) self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True) self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.fc1.bias, 0.0) nn.init.constant_(self.fc2.bias, 0.0) def init(self, x, init_scale=1.0): with torch.no_grad(): out = self.activation(self.fc1(x)) out = self.activation(self.fc2(out)) out = self.fc3.init(out, init_scale=0.0 * init_scale) return out def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_8 = self.fc3.linear.bias primals_6 = self.fc3.linear.weight_g primals_7 = self.fc3.linear.weight_v primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
wp03052/wolf
NICEMLPBlock
false
13,200
[ "Apache-2.0" ]
0
49a582cafb829a2642db360c7d94c21439247ec7
https://github.com/wp03052/wolf/tree/49a582cafb829a2642db360c7d94c21439247ec7
LipNormLinear
import torch import torch.nn as nn import torch.nn.functional as F def _max_except_dim(input, dim): maxed = input for axis in range(input.ndimension() - 1, dim, -1): maxed, _ = maxed.max(axis, keepdim=True) for axis in range(dim - 1, -1, -1): maxed, _ = maxed.max(axis, keepdim=True) return maxed def _norm_except_dim(w, norm_type, dim): if norm_type == 1 or norm_type == 2: return torch.norm_except_dim(w, norm_type, dim) elif norm_type == float('inf'): return _max_except_dim(w, dim) def operator_norm_settings(domain, codomain): if domain == 1 and codomain == 1: max_across_input_dims = True norm_type = 1 elif domain == 1 and codomain == 2: max_across_input_dims = True norm_type = 2 elif domain == 1 and codomain == float('inf'): max_across_input_dims = True norm_type = float('inf') elif domain == 2 and codomain == float('inf'): max_across_input_dims = False norm_type = 2 elif domain == float('inf') and codomain == float('inf'): max_across_input_dims = False norm_type = 1 else: raise ValueError('Unknown combination of domain "{}" and codomain "{}"' .format(domain, codomain)) return max_across_input_dims, norm_type def _logit(p): p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p)) return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10) class LipNormLinear(nn.Linear): """Lipschitz constant defined using operator norms.""" def __init__(self, in_features, out_features, bias=True, coeff=0.97, domain=float('inf'), codomain=float('inf'), local_constraint=True, **unused_kwargs): del unused_kwargs super(LipNormLinear, self).__init__(in_features, out_features, bias) self.coeff = coeff self.domain = domain self.codomain = codomain self.local_constraint = local_constraint max_across_input_dims, self.norm_type = operator_norm_settings(self .domain, self.codomain) self.max_across_dim = 1 if max_across_input_dims else 0 with torch.no_grad(): w_scale = _norm_except_dim(self.weight, self.norm_type, dim= self.max_across_dim) if not self.local_constraint: w_scale = w_scale.max() self.scale = nn.Parameter(_logit(w_scale / self.coeff)) def compute_weight(self): w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self. max_across_dim) if not self.local_constraint: w_scale = w_scale.max() return self.weight / w_scale * torch.sigmoid(self.scale) * self.coeff def forward(self, input): weight = self.compute_weight() return F.linear(input, weight, self.bias) def extra_repr(self): s = super(LipNormLinear, self).extra_repr() return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self .coeff, self.domain, self.codomain, self.local_constraint) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl_math.abs(tmp1) tmp4 = tl_math.abs(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.abs(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.abs(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp0 / tmp11 tmp14 = tl.sigmoid(tmp13) tmp15 = tmp12 * tmp14 tmp16 = 0.97 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x2, tmp17, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sigmoid_0[grid(16)](primals_1, primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0) def _max_except_dim(input, dim): maxed = input for axis in range(input.ndimension() - 1, dim, -1): maxed, _ = maxed.max(axis, keepdim=True) for axis in range(dim - 1, -1, -1): maxed, _ = maxed.max(axis, keepdim=True) return maxed def _norm_except_dim(w, norm_type, dim): if norm_type == 1 or norm_type == 2: return torch.norm_except_dim(w, norm_type, dim) elif norm_type == float('inf'): return _max_except_dim(w, dim) def operator_norm_settings(domain, codomain): if domain == 1 and codomain == 1: max_across_input_dims = True norm_type = 1 elif domain == 1 and codomain == 2: max_across_input_dims = True norm_type = 2 elif domain == 1 and codomain == float('inf'): max_across_input_dims = True norm_type = float('inf') elif domain == 2 and codomain == float('inf'): max_across_input_dims = False norm_type = 2 elif domain == float('inf') and codomain == float('inf'): max_across_input_dims = False norm_type = 1 else: raise ValueError('Unknown combination of domain "{}" and codomain "{}"' .format(domain, codomain)) return max_across_input_dims, norm_type def _logit(p): p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p)) return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10) class LipNormLinearNew(nn.Linear): """Lipschitz constant defined using operator norms.""" def __init__(self, in_features, out_features, bias=True, coeff=0.97, domain=float('inf'), codomain=float('inf'), local_constraint=True, **unused_kwargs): del unused_kwargs super(LipNormLinearNew, self).__init__(in_features, out_features, bias) self.coeff = coeff self.domain = domain self.codomain = codomain self.local_constraint = local_constraint max_across_input_dims, self.norm_type = operator_norm_settings(self .domain, self.codomain) self.max_across_dim = 1 if max_across_input_dims else 0 with torch.no_grad(): w_scale = _norm_except_dim(self.weight, self.norm_type, dim= self.max_across_dim) if not self.local_constraint: w_scale = w_scale.max() self.scale = nn.Parameter(_logit(w_scale / self.coeff)) def compute_weight(self): w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self. max_across_dim) if not self.local_constraint: w_scale = w_scale.max() return self.weight / w_scale * torch.sigmoid(self.scale) * self.coeff def extra_repr(self): s = super(LipNormLinearNew, self).extra_repr() return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self .coeff, self.domain, self.codomain, self.local_constraint) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = self.scale primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
zxydi1992/residual-flows
LipNormLinear
false
13,201
[ "MIT" ]
0
4ec289681dc91cff5312b22f7ebed93838b440fb
https://github.com/zxydi1992/residual-flows/tree/4ec289681dc91cff5312b22f7ebed93838b440fb
FusedConvBN
import math import torch import torch.nn as nn import torch.nn.functional as F import torch.quantization import torch.onnx import torchaudio.functional as F import torch.nn.parallel import torch.utils.data from torch.functional import F import torch.fx import torch.nn import torch.optim import torch.profiler def unsqueeze_all(t): return t[None, :, None, None] def batch_norm_backward(grad_out, X, sum, sqrt_var, N, eps): tmp = ((X - unsqueeze_all(sum) / N) * grad_out).sum(dim=(0, 2, 3)) tmp *= -1 d_denom = tmp / (sqrt_var + eps) ** 2 d_var = d_denom / (2 * sqrt_var) d_mean_dx = grad_out / unsqueeze_all(sqrt_var + eps) d_mean_dx = unsqueeze_all(-d_mean_dx.sum(dim=(0, 2, 3)) / N) grad_input = X * unsqueeze_all(d_var * N) grad_input += unsqueeze_all(-d_var * sum) grad_input *= 2 / ((N - 1) * N) grad_input += d_mean_dx grad_input *= unsqueeze_all(sqrt_var + eps) grad_input += grad_out grad_input /= unsqueeze_all(sqrt_var + eps) return grad_input def convolution_backward(grad_out, X, weight): grad_input = F.conv2d(X.transpose(0, 1), grad_out.transpose(0, 1) ).transpose(0, 1) grad_X = F.conv_transpose2d(grad_out, weight) return grad_X, grad_input class FusedConvBN2DFunction(torch.autograd.Function): @staticmethod def forward(ctx, X, conv_weight, eps=0.001): assert X.ndim == 4 ctx.save_for_backward(X, conv_weight) X = F.conv2d(X, conv_weight) sum = X.sum(dim=(0, 2, 3)) var = X.var(unbiased=True, dim=(0, 2, 3)) N = X.numel() / X.size(1) sqrt_var = torch.sqrt(var) ctx.eps = eps ctx.sum = sum ctx.N = N ctx.sqrt_var = sqrt_var mean = sum / N denom = sqrt_var + eps out = X - unsqueeze_all(mean) out /= unsqueeze_all(denom) return out @staticmethod def backward(ctx, grad_out): X, conv_weight = ctx.saved_tensors X_conv_out = F.conv2d(X, conv_weight) grad_out = batch_norm_backward(grad_out, X_conv_out, ctx.sum, ctx. sqrt_var, ctx.N, ctx.eps) grad_X, grad_input = convolution_backward(grad_out, X, conv_weight) return grad_X, grad_input, None, None, None, None, None class FusedConvBN(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, exp_avg_factor=0.1, eps=0.001, device=None, dtype=None): super(FusedConvBN, self).__init__() factory_kwargs = {'device': device, 'dtype': dtype} weight_shape = out_channels, in_channels, kernel_size, kernel_size self.conv_weight = nn.Parameter(torch.empty(*weight_shape, ** factory_kwargs)) num_features = out_channels self.num_features = num_features self.eps = eps self.reset_parameters() def forward(self, X): return FusedConvBN2DFunction.apply(X, self.conv_weight, self.eps) def reset_parameters(self) ->None: nn.init.kaiming_uniform_(self.conv_weight, a=math.sqrt(5)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.nn.functional as F import torch.quantization import torch.onnx import torchaudio.functional as F import torch.nn.parallel import torch.utils.data from torch.functional import F import torch.fx import torch.nn import torch.optim import torch.profiler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.25 tmp9 = tmp7 * tmp8 tmp10 = tmp0 - tmp9 tmp11 = 4.0 tmp12 = tmp7 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp2 - tmp12 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp4 - tmp12 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp6 - tmp12 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 0.001 tmp28 = tmp26 + tmp27 tmp29 = tmp10 / tmp28 tl.store(out_ptr0 + x2, tmp29, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, buf0, reinterpret_tensor(primals_2, (4, 4, 4, 4), (16, 64, 4, 1), 0) def unsqueeze_all(t): return t[None, :, None, None] def batch_norm_backward(grad_out, X, sum, sqrt_var, N, eps): tmp = ((X - unsqueeze_all(sum) / N) * grad_out).sum(dim=(0, 2, 3)) tmp *= -1 d_denom = tmp / (sqrt_var + eps) ** 2 d_var = d_denom / (2 * sqrt_var) d_mean_dx = grad_out / unsqueeze_all(sqrt_var + eps) d_mean_dx = unsqueeze_all(-d_mean_dx.sum(dim=(0, 2, 3)) / N) grad_input = X * unsqueeze_all(d_var * N) grad_input += unsqueeze_all(-d_var * sum) grad_input *= 2 / ((N - 1) * N) grad_input += d_mean_dx grad_input *= unsqueeze_all(sqrt_var + eps) grad_input += grad_out grad_input /= unsqueeze_all(sqrt_var + eps) return grad_input def convolution_backward(grad_out, X, weight): grad_input = F.conv2d(X.transpose(0, 1), grad_out.transpose(0, 1) ).transpose(0, 1) grad_X = F.conv_transpose2d(grad_out, weight) return grad_X, grad_input class FusedConvBN2DFunction(torch.autograd.Function): @staticmethod def forward(ctx, X, conv_weight, eps=0.001): assert X.ndim == 4 ctx.save_for_backward(X, conv_weight) X = F.conv2d(X, conv_weight) sum = X.sum(dim=(0, 2, 3)) var = X.var(unbiased=True, dim=(0, 2, 3)) N = X.numel() / X.size(1) sqrt_var = torch.sqrt(var) ctx.eps = eps ctx.sum = sum ctx.N = N ctx.sqrt_var = sqrt_var mean = sum / N denom = sqrt_var + eps out = X - unsqueeze_all(mean) out /= unsqueeze_all(denom) return out @staticmethod def backward(ctx, grad_out): X, conv_weight = ctx.saved_tensors X_conv_out = F.conv2d(X, conv_weight) grad_out = batch_norm_backward(grad_out, X_conv_out, ctx.sum, ctx. sqrt_var, ctx.N, ctx.eps) grad_X, grad_input = convolution_backward(grad_out, X, conv_weight) return grad_X, grad_input, None, None, None, None, None class FusedConvBNNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, exp_avg_factor=0.1, eps=0.001, device=None, dtype=None): super(FusedConvBNNew, self).__init__() factory_kwargs = {'device': device, 'dtype': dtype} weight_shape = out_channels, in_channels, kernel_size, kernel_size self.conv_weight = nn.Parameter(torch.empty(*weight_shape, ** factory_kwargs)) num_features = out_channels self.num_features = num_features self.eps = eps self.reset_parameters() def reset_parameters(self) ->None: nn.init.kaiming_uniform_(self.conv_weight, a=math.sqrt(5)) def forward(self, input_0): primals_1 = self.conv_weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
youkaichao/tutorials
FusedConvBN
false
13,202
[ "BSD-3-Clause" ]
0
af34b10b70d99659eb016a2a1d5c31b9ae8ba3da
https://github.com/youkaichao/tutorials/tree/af34b10b70d99659eb016a2a1d5c31b9ae8ba3da
BeitPooler
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.utils.checkpoint class BeitPooler(nn.Module): def __init__(self, config): super().__init__() self.layernorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) if config.use_mean_pooling else None def forward(self, hidden_states): if self.layernorm is not None: patch_tokens = hidden_states[:, 1:, :] pooled_output = self.layernorm(patch_tokens.mean(1)) else: pooled_output = hidden_states[:, 0] return pooled_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(use_mean_pooling=4, hidden_size=4, layer_norm_eps=1)}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (16 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (32 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (48 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (17 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (33 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (49 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (18 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (34 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (50 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (19 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (35 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (51 + 4 * x0 + 64 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 3.0 tmp6 = tmp4 / tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp11 / tmp5 tmp13 = tmp6 + tmp12 tmp16 = tmp14 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp18 / tmp5 tmp20 = tmp13 + tmp19 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp25 / tmp5 tmp27 = tmp20 + tmp26 tmp28 = 4.0 tmp29 = tmp27 / tmp28 tmp30 = tmp6 - tmp29 tmp31 = tmp30 * tmp30 tmp32 = tmp12 - tmp29 tmp33 = tmp32 * tmp32 tmp34 = tmp31 + tmp33 tmp35 = tmp19 - tmp29 tmp36 = tmp35 * tmp35 tmp37 = tmp34 + tmp36 tmp38 = tmp26 - tmp29 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp41 = tmp40 / tmp28 tl.store(out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr1 + x2, tmp41, xmask) @triton.jit def triton_poi_fused_mean_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x4 = xindex % 16 x3 = xindex // 4 x5 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (16 + x4 + 64 * x2), xmask) tmp1 = tl.load(in_ptr0 + (32 + x4 + 64 * x2), xmask) tmp3 = tl.load(in_ptr0 + (48 + x4 + 64 * x2), xmask) tmp7 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 3.0 tmp6 = tmp4 / tmp5 tmp8 = tmp6 - tmp7 tmp10 = 1.0 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp8 * tmp12 tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tl.store(out_ptr0 + x5, tmp13, xmask) tl.store(out_ptr1 + x5, tmp17, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_mean_native_layer_norm_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mean_native_layer_norm_1[grid(64)](primals_1, buf0, buf1, primals_2, primals_3, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 del primals_3 return buf3, buf2 class BeitPoolerNew(nn.Module): def __init__(self, config): super().__init__() self.layernorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) if config.use_mean_pooling else None def forward(self, input_0): primals_2 = self.layernorm.weight primals_3 = self.layernorm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Clemens123/transformers
BeitPooler
false
13,203
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
AttDec
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class AttDec(nn.Module): def __init__(self, opt, attSize): super(AttDec, self).__init__() self.embedSz = 0 self.fc1 = nn.Linear(opt.resSize + self.embedSz, opt.ngh) self.fc3 = nn.Linear(opt.ngh, attSize) self.lrelu = nn.LeakyReLU(0.2, True) self.hidden = None self.sigmoid = None self.apply(weights_init) def forward(self, feat, att=None): h = feat if self.embedSz > 0: assert att is not None, 'Conditional Decoder requires attribute input' h = torch.cat((feat, att), 1) self.hidden = self.lrelu(self.fc1(h)) h = self.fc3(self.hidden) if self.sigmoid is not None: h = self.sigmoid(h) else: h = h / h.pow(2).sum(1).sqrt().unsqueeze(1).expand(h.size(0), h .size(1)) self.out = h return h def getLayersOutDet(self): return self.hidden.detach() def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(resSize=4, ngh=4), 'attSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf3, buf1, primals_1, buf1, buf2, primals_4 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class AttDecNew(nn.Module): def __init__(self, opt, attSize): super(AttDecNew, self).__init__() self.embedSz = 0 self.fc1 = nn.Linear(opt.resSize + self.embedSz, opt.ngh) self.fc3 = nn.Linear(opt.ngh, attSize) self.lrelu = nn.LeakyReLU(0.2, True) self.hidden = None self.sigmoid = None self.apply(weights_init) def getLayersOutDet(self): return self.hidden.detach() def forward(self, input_0): primals_1 = self.fc1.weight primals_3 = self.fc1.bias primals_2 = self.fc3.weight primals_5 = self.fc3.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
IacoSimoncini/tfvaegan
AttDec
false
13,204
[ "MIT" ]
0
157b526d65d0b0d5412f4be6fed02fc7d6325827
https://github.com/IacoSimoncini/tfvaegan/tree/157b526d65d0b0d5412f4be6fed02fc7d6325827
DeiTAttention
from _paritybench_helpers import _mock_config import math import torch from typing import List from typing import Tuple from torch import nn from typing import Set import torch.utils.checkpoint def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int', head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking :obj:`already_pruned_heads` into account. Args: heads (:obj:`List[int]`): List of the indices of heads to prune. n_heads (:obj:`int`): The number of heads in the model. head_size (:obj:`int`): The size of each head. already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. Returns: :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads for head in heads: head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long() return heads, index def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim: 'int'=0) ->nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`torch.nn.Linear`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None ) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class DeiTSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs class DeiTSelfOutput(nn.Module): """ The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class DeiTAttention(nn.Module): def __init__(self, config): super().__init__() self.attention = DeiTSelfAttention(config) self.output = DeiTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self. attention.num_attention_heads, self.attention. attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = (self.attention. num_attention_heads - len(heads)) self.attention.all_head_size = (self.attention.attention_head_size * self.attention.num_attention_heads) self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, head_mask=None, output_attentions=False): self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from typing import List from typing import Tuple from torch import nn from typing import Set import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_9 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_8 def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int', head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking :obj:`already_pruned_heads` into account. Args: heads (:obj:`List[int]`): List of the indices of heads to prune. n_heads (:obj:`int`): The number of heads in the model. head_size (:obj:`int`): The size of each head. already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. Returns: :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads for head in heads: head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long() return heads, index def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim: 'int'=0) ->nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`torch.nn.Linear`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None ) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class DeiTSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs class DeiTSelfOutput(nn.Module): """ The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class DeiTAttentionNew(nn.Module): def __init__(self, config): super().__init__() self.attention = DeiTSelfAttention(config) self.output = DeiTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self. attention.num_attention_heads, self.attention. attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = (self.attention. num_attention_heads - len(heads)) self.attention.all_head_size = (self.attention.attention_head_size * self.attention.num_attention_heads) self.pruned_heads = self.pruned_heads.union(heads) def forward(self, input_0): primals_1 = self.attention.query.weight primals_2 = self.attention.query.bias primals_4 = self.attention.key.weight primals_5 = self.attention.key.bias primals_6 = self.attention.value.weight primals_7 = self.attention.value.bias primals_8 = self.output.dense.weight primals_9 = self.output.dense.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Clemens123/transformers
DeiTAttention
false
13,205
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
Discriminator_D1
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Discriminator_D1(nn.Module): def __init__(self, opt): super(Discriminator_D1, self).__init__() self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh) self.fc2 = nn.Linear(opt.ndh, 1) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, x, att): h = torch.cat((x, att), 1) self.hidden = self.lrelu(self.fc1(h)) h = self.fc2(self.hidden) return h def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(resSize=4, attSize=4, ndh=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_leaky_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return buf4, buf2, buf0, buf2, primals_5 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Discriminator_D1New(nn.Module): def __init__(self, opt): super(Discriminator_D1New, self).__init__() self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh) self.fc2 = nn.Linear(opt.ndh, 1) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
IacoSimoncini/tfvaegan
Discriminator_D1
false
13,206
[ "MIT" ]
0
157b526d65d0b0d5412f4be6fed02fc7d6325827
https://github.com/IacoSimoncini/tfvaegan/tree/157b526d65d0b0d5412f4be6fed02fc7d6325827
SPPblock
import torch import torch.nn as nn import torch.nn.functional as F class SPPblock(nn.Module): def __init__(self, in_channels): super(SPPblock, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2) self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3) self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5) self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6) self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0) def forward(self, x): self.in_channels, h, w = x.size(1), x.size(2), x.size(3) self.layer1 = F.upsample(self.conv(self.pool1(x)), size=(h, w), mode='bilinear') self.layer2 = F.upsample(self.conv(self.pool2(x)), size=(h, w), mode='bilinear') self.layer3 = F.upsample(self.conv(self.pool3(x)), size=(h, w), mode='bilinear') self.layer4 = F.upsample(self.conv(self.pool4(x)), size=(h, w), mode='bilinear') out = torch.cat([self.layer1, self.layer2, self.layer3, self.layer4, x], 1) return out def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 31, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 7056 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 21 x1 = xindex // 21 % 21 x4 = xindex // 441 x3 = xindex // 1764 x5 = xindex % 1764 tmp0 = tl.load(in_ptr0 + (3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (64 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (65 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (66 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (128 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (129 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (130 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + (x5 + 1792 * x3), tmp16, xmask) @triton.jit def triton_poi_fused__to_copy_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.328125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_6(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.328125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 20, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.328125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 % 12 x2 = xindex // 144 x3 = xindex tmp0 = tl.load(in_ptr0 + (5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (64 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (65 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (66 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (67 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (68 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (128 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (129 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (130 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (131 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (132 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (192 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (193 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (194 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr0 + (195 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr0 + (196 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr0 + (256 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr0 + (257 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr0 + (258 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr0 + (259 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr0 + (260 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp32 = triton_helpers.maximum(tmp31, tmp30) tmp34 = triton_helpers.maximum(tmp33, tmp32) tmp36 = triton_helpers.maximum(tmp35, tmp34) tmp38 = triton_helpers.maximum(tmp37, tmp36) tmp40 = triton_helpers.maximum(tmp39, tmp38) tmp42 = triton_helpers.maximum(tmp41, tmp40) tmp44 = triton_helpers.maximum(tmp43, tmp42) tmp46 = triton_helpers.maximum(tmp45, tmp44) tmp48 = triton_helpers.maximum(tmp47, tmp46) tl.store(out_ptr0 + x3, tmp48, xmask) @triton.jit def triton_poi_fused__to_copy_9(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.1875 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.1875 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 11, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.1875 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.15625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.15625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 9, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.15625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_15(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x2 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + 0) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp13 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp49 = tl.load(in_ptr11 + x0, None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr12 + x0, None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr13 + x1, None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr14 + x1, None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr15 + x1, None, eviction_policy='evict_last') tmp79 = tl.load(in_ptr16 + x0, None, eviction_policy='evict_last') tmp85 = tl.load(in_ptr18 + x0, None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr19 + x0, None, eviction_policy='evict_last') tmp95 = tl.load(in_ptr20 + x1, None, eviction_policy='evict_last') tmp107 = tl.load(in_ptr21 + x1, None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr22 + x1, None, eviction_policy='evict_last') tmp115 = tl.load(in_ptr23 + x0, None, eviction_policy='evict_last') tmp121 = tl.load(in_ptr25 + x0, None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr26 + x0, None, eviction_policy='evict_last') tmp131 = tl.load(in_ptr27 + x1, None, eviction_policy='evict_last') tmp143 = tl.load(in_ptr28 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x2), None, eviction_policy='evict_last') tmp12 = tmp9 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tmp13 < 0 tmp16 = tl.where(tmp15, tmp14, tmp13) tmp17 = tl.load(in_ptr2 + (tmp16 + 32 * tmp4 + 1024 * x2), None, eviction_policy='evict_last') tmp18 = tmp17 + tmp11 tmp19 = tmp18 - tmp12 tmp21 = tmp19 * tmp20 tmp22 = tmp12 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp23 < 0 tmp26 = tl.where(tmp25, tmp24, tmp23) tmp27 = tl.load(in_ptr2 + (tmp8 + 32 * tmp26 + 1024 * x2), None, eviction_policy='evict_last') tmp28 = tmp27 + tmp11 tmp29 = tl.load(in_ptr2 + (tmp16 + 32 * tmp26 + 1024 * x2), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp11 tmp31 = tmp30 - tmp28 tmp32 = tmp31 * tmp20 tmp33 = tmp28 + tmp32 tmp34 = tmp33 - tmp22 tmp36 = tmp34 * tmp35 tmp37 = tmp22 + tmp36 tmp39 = tl.full([XBLOCK], 21, tl.int32) tmp40 = tmp38 + tmp39 tmp41 = tmp38 < 0 tmp42 = tl.where(tmp41, tmp40, tmp38) tmp44 = tmp43 + tmp39 tmp45 = tmp43 < 0 tmp46 = tl.where(tmp45, tmp44, tmp43) tmp47 = tl.load(in_ptr10 + (tmp46 + 21 * tmp42 + 441 * x2), None, eviction_policy='evict_last') tmp48 = tmp47 + tmp11 tmp50 = tmp49 + tmp39 tmp51 = tmp49 < 0 tmp52 = tl.where(tmp51, tmp50, tmp49) tmp53 = tl.load(in_ptr10 + (tmp52 + 21 * tmp42 + 441 * x2), None, eviction_policy='evict_last') tmp54 = tmp53 + tmp11 tmp55 = tmp54 - tmp48 tmp57 = tmp55 * tmp56 tmp58 = tmp48 + tmp57 tmp60 = tmp59 + tmp39 tmp61 = tmp59 < 0 tmp62 = tl.where(tmp61, tmp60, tmp59) tmp63 = tl.load(in_ptr10 + (tmp46 + 21 * tmp62 + 441 * x2), None, eviction_policy='evict_last') tmp64 = tmp63 + tmp11 tmp65 = tl.load(in_ptr10 + (tmp52 + 21 * tmp62 + 441 * x2), None, eviction_policy='evict_last') tmp66 = tmp65 + tmp11 tmp67 = tmp66 - tmp64 tmp68 = tmp67 * tmp56 tmp69 = tmp64 + tmp68 tmp70 = tmp69 - tmp58 tmp72 = tmp70 * tmp71 tmp73 = tmp58 + tmp72 tmp75 = tl.full([XBLOCK], 12, tl.int32) tmp76 = tmp74 + tmp75 tmp77 = tmp74 < 0 tmp78 = tl.where(tmp77, tmp76, tmp74) tmp80 = tmp79 + tmp75 tmp81 = tmp79 < 0 tmp82 = tl.where(tmp81, tmp80, tmp79) tmp83 = tl.load(in_ptr17 + (tmp82 + 12 * tmp78 + 144 * x2), None, eviction_policy='evict_last') tmp84 = tmp83 + tmp11 tmp86 = tmp85 + tmp75 tmp87 = tmp85 < 0 tmp88 = tl.where(tmp87, tmp86, tmp85) tmp89 = tl.load(in_ptr17 + (tmp88 + 12 * tmp78 + 144 * x2), None, eviction_policy='evict_last') tmp90 = tmp89 + tmp11 tmp91 = tmp90 - tmp84 tmp93 = tmp91 * tmp92 tmp94 = tmp84 + tmp93 tmp96 = tmp95 + tmp75 tmp97 = tmp95 < 0 tmp98 = tl.where(tmp97, tmp96, tmp95) tmp99 = tl.load(in_ptr17 + (tmp82 + 12 * tmp98 + 144 * x2), None, eviction_policy='evict_last') tmp100 = tmp99 + tmp11 tmp101 = tl.load(in_ptr17 + (tmp88 + 12 * tmp98 + 144 * x2), None, eviction_policy='evict_last') tmp102 = tmp101 + tmp11 tmp103 = tmp102 - tmp100 tmp104 = tmp103 * tmp92 tmp105 = tmp100 + tmp104 tmp106 = tmp105 - tmp94 tmp108 = tmp106 * tmp107 tmp109 = tmp94 + tmp108 tmp111 = tl.full([XBLOCK], 10, tl.int32) tmp112 = tmp110 + tmp111 tmp113 = tmp110 < 0 tmp114 = tl.where(tmp113, tmp112, tmp110) tmp116 = tmp115 + tmp111 tmp117 = tmp115 < 0 tmp118 = tl.where(tmp117, tmp116, tmp115) tmp119 = tl.load(in_ptr24 + (tmp118 + 10 * tmp114 + 100 * x2), None, eviction_policy='evict_last') tmp120 = tmp119 + tmp11 tmp122 = tmp121 + tmp111 tmp123 = tmp121 < 0 tmp124 = tl.where(tmp123, tmp122, tmp121) tmp125 = tl.load(in_ptr24 + (tmp124 + 10 * tmp114 + 100 * x2), None, eviction_policy='evict_last') tmp126 = tmp125 + tmp11 tmp127 = tmp126 - tmp120 tmp129 = tmp127 * tmp128 tmp130 = tmp120 + tmp129 tmp132 = tmp131 + tmp111 tmp133 = tmp131 < 0 tmp134 = tl.where(tmp133, tmp132, tmp131) tmp135 = tl.load(in_ptr24 + (tmp118 + 10 * tmp134 + 100 * x2), None, eviction_policy='evict_last') tmp136 = tmp135 + tmp11 tmp137 = tl.load(in_ptr24 + (tmp124 + 10 * tmp134 + 100 * x2), None, eviction_policy='evict_last') tmp138 = tmp137 + tmp11 tmp139 = tmp138 - tmp136 tmp140 = tmp139 * tmp128 tmp141 = tmp136 + tmp140 tmp142 = tmp141 - tmp130 tmp144 = tmp142 * tmp143 tmp145 = tmp130 + tmp144 tl.store(in_out_ptr0 + x3, tmp37, None) tl.store(in_out_ptr1 + x3, tmp73, None) tl.store(in_out_ptr2 + x3, tmp109, None) tl.store(in_out_ptr3 + x3, tmp145, None) @triton.jit def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 8 x0 = xindex % 4096 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x2), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp9, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp14, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr3 + (x0 + 4096 * x2), tmp19, eviction_policy= 'evict_last', other=0.0) tmp21 = tmp0 >= tmp17 tl.full([1], 8, tl.int64) tmp24 = tl.load(in_ptr4 + (x0 + 4096 * (-4 + x1) + 16384 * x2), tmp21, other=0.0) tmp25 = tl.where(tmp19, tmp20, tmp24) tmp26 = tl.where(tmp14, tmp15, tmp25) tmp27 = tl.where(tmp9, tmp10, tmp26) tmp28 = tl.where(tmp4, tmp5, tmp27) tl.store(out_ptr0 + x3, tmp28, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16384)](primals_1, buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 32, 32), (1024, 1024, 32, 1)) buf2 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_2[grid(64)](buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_1[grid(64)](buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_2[grid(64)](buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(64)](buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 21, 21), (1792, 441, 21, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_4[grid(7056)](primals_1, buf11, 7056, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 1, 21, 21), (441, 441, 21, 1)) buf13 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_5[grid(64)](buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_6[grid(64)](buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_5[grid(64)](buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_6[grid(64)](buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(64)](buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(64)](buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_8[grid(2304)](primals_1, buf22, 2304, XBLOCK=128, num_warps=4, num_stages=1) buf23 = extern_kernels.convolution(buf22, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 1, 12, 12), (144, 144, 12, 1)) buf24 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_9[grid(64)](buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_10[grid(64)](buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_9[grid(64)](buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_10[grid(64)](buf27, 64, XBLOCK=64, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11[grid(64)](buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11[grid(64)](buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) buf33 = torch.ops.aten.max_pool2d_with_indices.default(primals_1, [ 6, 6], [6, 6]) buf34 = buf33[0] del buf33 buf36 = extern_kernels.convolution(buf34, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 1, 10, 10), (100, 100, 10, 1)) buf37 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_12[grid(64)](buf37, 64, XBLOCK=64, num_warps=1, num_stages=1) buf38 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_13[grid(64)](buf38, 64, XBLOCK=64, num_warps=1, num_stages=1) buf39 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_12[grid(64)](buf39, 64, XBLOCK=64, num_warps=1, num_stages=1) buf40 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_13[grid(64)](buf40, 64, XBLOCK=64, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(64)](buf41, 64, XBLOCK=64, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(64)](buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(64)](buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf10 = reinterpret_tensor(buf9, (4, 1, 64, 64), (4096, 4096, 64, 1), 0 ) del buf9 buf20 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf21 = reinterpret_tensor(buf20, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf20 buf31 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf32 = reinterpret_tensor(buf31, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf31 buf44 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf45 = reinterpret_tensor(buf44, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf44 triton_poi_fused__unsafe_index_add_convolution_mul_sub_15[grid(16384)]( buf10, buf21, buf32, buf45, buf2, buf4, buf1, primals_3, buf5, buf6, buf3, buf8, buf13, buf15, buf12, buf16, buf17, buf14, buf19, buf24, buf26, buf23, buf27, buf28, buf25, buf30, buf37, buf39, buf36, buf40, buf41, buf38, buf43, 16384, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf12 del buf23 del buf36 del primals_3 buf46 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.float32) triton_poi_fused_cat_16[grid(131072)](buf10, buf21, buf32, buf45, primals_1, buf46, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_1 return (buf46, buf45, buf32, buf21, buf10, primals_2, buf0, buf2, buf3, buf4, buf5, buf6, buf8, buf11, buf13, buf14, buf15, buf16, buf17, buf19, buf22, buf24, buf25, buf26, buf27, buf28, buf30, buf34, buf37, buf38, buf39, buf40, buf41, buf43) class SPPblockNew(nn.Module): def __init__(self, in_channels): super(SPPblockNew, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2) self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3) self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5) self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6) self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
zxg3017/CUSE-Net
SPPblock
false
13,207
[ "MIT" ]
0
ea1d07027f89130a8a40465de94528f23eb9f5d1
https://github.com/zxg3017/CUSE-Net/tree/ea1d07027f89130a8a40465de94528f23eb9f5d1
SoftMaxAvgPoolModel
import torch import torch.cuda import torch.nn import torch.utils.data import torch.fx import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 class SoftMaxAvgPoolModel(torch.nn.Module): def __init__(self): super(SoftMaxAvgPoolModel, self).__init__() self.sfmax = torch.nn.Softmax(dim=1) self.avgpool = torch.nn.AvgPool2d(3) def forward(self, inp): x = self.sfmax(inp) return self.avgpool(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.cuda import torch.nn import torch.utils.data import torch.fx import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused__softmax_avg_pool2d_2[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 return buf2, class SoftMaxAvgPoolModelNew(torch.nn.Module): def __init__(self): super(SoftMaxAvgPoolModelNew, self).__init__() self.sfmax = torch.nn.Softmax(dim=1) self.avgpool = torch.nn.AvgPool2d(3) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
quic-kyunggeu/aimet
SoftMaxAvgPoolModel
false
13,208
[ "BSD-3-Clause" ]
0
877835d5aafcef17cf12864124977d3c128d4aca
https://github.com/quic-kyunggeu/aimet/tree/877835d5aafcef17cf12864124977d3c128d4aca
MIRB2
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, groups=3): super(ConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=1, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class ConvBlockD(nn.Module): def __init__(self, in_channels, out_channels, groups=3, ker_size=2): super(ConvBlockD, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=ker_size, dilation=ker_size, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class MIRB2(nn.Module): def __init__(self, args): super(MIRB2, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out def get_inputs(): return [torch.rand([4, 18, 64, 64])] def get_init_inputs(): return [[], {'args': _mock_config(n_feats=18)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.sqrt(tmp16) tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 108 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 6 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 18 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask) @triton.jit def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 9 rnumel = 18 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 18 x0 = xindex % 4096 x2 = xindex // 73728 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 9, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 36864 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 18, tl.int64) tmp18 = tl.load(in_ptr2 + (x0 + 4096 * (-9 + x1) + 36864 * x2), tmp15, other=0.0) tmp19 = tl.load(in_ptr3 + (-9 + x1), tmp15, eviction_policy= 'evict_last', other=0.0) tmp20 = tmp18 + tmp19 tmp21 = tmp20 > tmp8 tmp22 = tmp20 * tmp10 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp15, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp14, tmp25) tl.store(out_ptr0 + x3, tmp26, None) @triton.jit def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 18 rnumel = 18 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 9 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58 ) = args args.clear() assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1)) assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_4, (18,), (1,)) assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_7, (18,), (1,)) assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_10, (9,), (1,)) assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_13, (18,), (1,)) assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_16, (18,), (1,)) assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_19, (9,), (1,)) assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_22, (18,), (1,)) assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_25, (18,), (1,)) assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_28, (9,), (1,)) assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_31, (18,), (1,)) assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_34, (18,), (1,)) assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_37, (9,), (1,)) assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_40, (18,), (1,)) assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_43, (18,), (1,)) assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_46, (9,), (1,)) assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_49, (18,), (1,)) assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_52, (18,), (1,)) assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_55, (9,), (1,)) assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_58, (18,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_3, buf0, 18, XBLOCK=32, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_3, primals_2, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_2[grid(294912)](buf3, primals_4, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_4 buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32 ) buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf4 buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6, primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf9 buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf10, primals_9, primals_8, buf11, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_12, buf13, 18, XBLOCK=32, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_12, primals_11, buf13, buf14, 108, XBLOCK=128, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_2[grid(294912)](buf16, primals_13, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf17 buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf18, primals_15, primals_14, buf19, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_2[grid(294912)](buf21, primals_16, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_16 buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf22 buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf23, primals_18, primals_17, buf24, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf12, primals_10, buf25, primals_19, buf26, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_21, buf27, 18, XBLOCK=32, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_21, primals_20, buf27, buf28, 108, XBLOCK=128, num_warps=4, num_stages=1) buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf30 = buf29 del buf29 triton_poi_fused_convolution_2[grid(294912)](buf30, primals_22, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_22 buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf31 buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf32, primals_24, primals_23, buf33, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf35 = buf34 del buf34 triton_poi_fused_convolution_2[grid(294912)](buf35, primals_25, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf36 buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf37, primals_27, primals_26, buf38, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_30, buf40, 18, XBLOCK=32, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_30, primals_29, buf40, buf41, 108, XBLOCK=128, num_warps=4, num_stages=1) buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_2[grid(294912)](buf43, primals_31, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_31 buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf44 buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf45, primals_33, primals_32, buf46, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf48 = buf47 del buf47 triton_poi_fused_convolution_2[grid(294912)](buf48, primals_34, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_34 buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf49 buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf50, primals_36, primals_35, buf51, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf39, primals_28, buf52, primals_37, buf53, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_39, buf54, 18, XBLOCK=32, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_39, primals_38, buf54, buf55, 108, XBLOCK=128, num_warps=4, num_stages=1) buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_2[grid(294912)](buf57, primals_40, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_40 buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf58 buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf59, primals_42, primals_41, buf60, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf62 = buf61 del buf61 triton_poi_fused_convolution_2[grid(294912)](buf62, primals_43, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf63 buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf64, primals_45, primals_44, buf65, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_48, buf67, 18, XBLOCK=32, num_warps=1, num_stages=1) buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_48, primals_47, buf67, buf68, 108, XBLOCK=128, num_warps=4, num_stages=1) buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf70 = buf69 del buf69 triton_poi_fused_convolution_2[grid(294912)](buf70, primals_49, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_49 buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf71 buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf72, primals_51, primals_50, buf73, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf75 = buf74 del buf74 triton_poi_fused_convolution_2[grid(294912)](buf75, primals_52, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_52 buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf76 buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf77, primals_54, primals_53, buf78, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf66, primals_46, buf79, primals_55, buf80, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf81 buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_6[grid(18)](buf82, primals_57, primals_56, buf83, 18, 18, XBLOCK=32, num_warps=8, num_stages=1) buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf85 = buf84 del buf84 triton_poi_fused_add_convolution_7[grid(294912)](buf85, primals_58, primals_1, 294912, XBLOCK=1024, num_warps=4, num_stages=1) del primals_58 buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf79, primals_55, buf86, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf79 del primals_55 buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf66, primals_46, buf87, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf66 del primals_46 buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf52, primals_37, buf88, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf52 del primals_37 buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf39, primals_28, buf89, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf39 del primals_28 buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf25, primals_19, buf90, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf25 del primals_19 buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf12, primals_10, buf91, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf12 del primals_10 return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33, buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73, buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3, buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, groups=3): super(ConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=1, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class ConvBlockD(nn.Module): def __init__(self, in_channels, out_channels, groups=3, ker_size=2): super(ConvBlockD, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=ker_size, dilation=ker_size, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class MIRB2New(nn.Module): def __init__(self, args): super(MIRB2New, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_4 = self.conv3_1.group_conv.bias primals_2 = self.conv3_1.group_conv.weight_g primals_3 = self.conv3_1.group_conv.weight_v primals_7 = self.conv3_1.depth_conv.bias primals_5 = self.conv3_1.depth_conv.weight_g primals_6 = self.conv3_1.depth_conv.weight_v primals_10 = self.conv3_1.point_conv.bias primals_8 = self.conv3_1.point_conv.weight_g primals_9 = self.conv3_1.point_conv.weight_v primals_13 = self.convd_1.group_conv.bias primals_11 = self.convd_1.group_conv.weight_g primals_12 = self.convd_1.group_conv.weight_v primals_16 = self.convd_1.depth_conv.bias primals_14 = self.convd_1.depth_conv.weight_g primals_15 = self.convd_1.depth_conv.weight_v primals_19 = self.convd_1.point_conv.bias primals_17 = self.convd_1.point_conv.weight_g primals_18 = self.convd_1.point_conv.weight_v primals_22 = self.conv3_2.group_conv.bias primals_20 = self.conv3_2.group_conv.weight_g primals_21 = self.conv3_2.group_conv.weight_v primals_25 = self.conv3_2.depth_conv.bias primals_23 = self.conv3_2.depth_conv.weight_g primals_24 = self.conv3_2.depth_conv.weight_v primals_28 = self.conv3_2.point_conv.bias primals_26 = self.conv3_2.point_conv.weight_g primals_27 = self.conv3_2.point_conv.weight_v primals_31 = self.convd_2.group_conv.bias primals_29 = self.convd_2.group_conv.weight_g primals_30 = self.convd_2.group_conv.weight_v primals_34 = self.convd_2.depth_conv.bias primals_32 = self.convd_2.depth_conv.weight_g primals_33 = self.convd_2.depth_conv.weight_v primals_37 = self.convd_2.point_conv.bias primals_35 = self.convd_2.point_conv.weight_g primals_36 = self.convd_2.point_conv.weight_v primals_40 = self.conv3_3.group_conv.bias primals_38 = self.conv3_3.group_conv.weight_g primals_39 = self.conv3_3.group_conv.weight_v primals_43 = self.conv3_3.depth_conv.bias primals_41 = self.conv3_3.depth_conv.weight_g primals_42 = self.conv3_3.depth_conv.weight_v primals_46 = self.conv3_3.point_conv.bias primals_44 = self.conv3_3.point_conv.weight_g primals_45 = self.conv3_3.point_conv.weight_v primals_49 = self.convd_3.group_conv.bias primals_47 = self.convd_3.group_conv.weight_g primals_48 = self.convd_3.group_conv.weight_v primals_52 = self.convd_3.depth_conv.bias primals_50 = self.convd_3.depth_conv.weight_g primals_51 = self.convd_3.depth_conv.weight_v primals_55 = self.convd_3.point_conv.bias primals_53 = self.convd_3.point_conv.weight_g primals_54 = self.convd_3.point_conv.weight_v primals_58 = self.conv_last.bias primals_56 = self.conv_last.weight_g primals_57 = self.conv_last.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58]) return output[0]
wwjfsfs/wwjyyds
MIRB2
false
13,209
[ "MIT" ]
0
80cd6267fde7cd98838078a0d5178a557ceb7414
https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414
MIRB1
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, groups=3): super(ConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=1, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class MIRB1(nn.Module): def __init__(self, args): super(MIRB1, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlock(args.n_feats, self.c_out) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlock(args.n_feats, self.c_out) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlock(args.n_feats, self.c_out) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out def get_inputs(): return [torch.rand([4, 18, 64, 64])] def get_init_inputs(): return [[], {'args': _mock_config(n_feats=18)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.sqrt(tmp16) tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 108 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 6 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 18 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask) @triton.jit def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 9 rnumel = 18 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 18 x0 = xindex % 4096 x2 = xindex // 73728 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 9, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 36864 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 18, tl.int64) tmp18 = tl.load(in_ptr2 + (x0 + 4096 * (-9 + x1) + 36864 * x2), tmp15, other=0.0) tmp19 = tl.load(in_ptr3 + (-9 + x1), tmp15, eviction_policy= 'evict_last', other=0.0) tmp20 = tmp18 + tmp19 tmp21 = tmp20 > tmp8 tmp22 = tmp20 * tmp10 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp15, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp14, tmp25) tl.store(out_ptr0 + x3, tmp26, None) @triton.jit def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 18 rnumel = 18 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 9 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58 ) = args args.clear() assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1)) assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_4, (18,), (1,)) assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_7, (18,), (1,)) assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_10, (9,), (1,)) assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_13, (18,), (1,)) assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_16, (18,), (1,)) assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_19, (9,), (1,)) assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_22, (18,), (1,)) assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_25, (18,), (1,)) assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_28, (9,), (1,)) assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_31, (18,), (1,)) assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_34, (18,), (1,)) assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_37, (9,), (1,)) assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_40, (18,), (1,)) assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_43, (18,), (1,)) assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_46, (9,), (1,)) assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_49, (18,), (1,)) assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_52, (18,), (1,)) assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_55, (9,), (1,)) assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_58, (18,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_3, buf0, 18, XBLOCK=32, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_3, primals_2, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_2[grid(294912)](buf3, primals_4, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_4 buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32 ) buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf4 buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6, primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf9 buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf10, primals_9, primals_8, buf11, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_12, buf13, 18, XBLOCK=32, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_12, primals_11, buf13, buf14, 108, XBLOCK=128, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_2[grid(294912)](buf16, primals_13, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf17 buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf18, primals_15, primals_14, buf19, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_2[grid(294912)](buf21, primals_16, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_16 buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf22 buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf23, primals_18, primals_17, buf24, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf12, primals_10, buf25, primals_19, buf26, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_21, buf27, 18, XBLOCK=32, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_21, primals_20, buf27, buf28, 108, XBLOCK=128, num_warps=4, num_stages=1) buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf30 = buf29 del buf29 triton_poi_fused_convolution_2[grid(294912)](buf30, primals_22, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_22 buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf31 buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf32, primals_24, primals_23, buf33, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf35 = buf34 del buf34 triton_poi_fused_convolution_2[grid(294912)](buf35, primals_25, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf36 buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf37, primals_27, primals_26, buf38, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_30, buf40, 18, XBLOCK=32, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_30, primals_29, buf40, buf41, 108, XBLOCK=128, num_warps=4, num_stages=1) buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_2[grid(294912)](buf43, primals_31, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_31 buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf44 buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf45, primals_33, primals_32, buf46, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf48 = buf47 del buf47 triton_poi_fused_convolution_2[grid(294912)](buf48, primals_34, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_34 buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf49 buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf50, primals_36, primals_35, buf51, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf39, primals_28, buf52, primals_37, buf53, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_39, buf54, 18, XBLOCK=32, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_39, primals_38, buf54, buf55, 108, XBLOCK=128, num_warps=4, num_stages=1) buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_2[grid(294912)](buf57, primals_40, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_40 buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf58 buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf59, primals_42, primals_41, buf60, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf62 = buf61 del buf61 triton_poi_fused_convolution_2[grid(294912)](buf62, primals_43, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf63 buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf64, primals_45, primals_44, buf65, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_48, buf67, 18, XBLOCK=32, num_warps=1, num_stages=1) buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_48, primals_47, buf67, buf68, 108, XBLOCK=128, num_warps=4, num_stages=1) buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf70 = buf69 del buf69 triton_poi_fused_convolution_2[grid(294912)](buf70, primals_49, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_49 buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf71 buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf72, primals_51, primals_50, buf73, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf75 = buf74 del buf74 triton_poi_fused_convolution_2[grid(294912)](buf75, primals_52, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_52 buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf76 buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf77, primals_54, primals_53, buf78, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf66, primals_46, buf79, primals_55, buf80, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf81 buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_6[grid(18)](buf82, primals_57, primals_56, buf83, 18, 18, XBLOCK=32, num_warps=8, num_stages=1) buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf85 = buf84 del buf84 triton_poi_fused_add_convolution_7[grid(294912)](buf85, primals_58, primals_1, 294912, XBLOCK=1024, num_warps=4, num_stages=1) del primals_58 buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf79, primals_55, buf86, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf79 del primals_55 buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf66, primals_46, buf87, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf66 del primals_46 buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf52, primals_37, buf88, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf52 del primals_37 buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf39, primals_28, buf89, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf39 del primals_28 buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf25, primals_19, buf90, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf25 del primals_19 buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid( 147456)](buf12, primals_10, buf91, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf12 del primals_10 return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33, buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73, buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3, buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, groups=3): super(ConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=1, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class MIRB1New(nn.Module): def __init__(self, args): super(MIRB1New, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlock(args.n_feats, self.c_out) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlock(args.n_feats, self.c_out) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlock(args.n_feats, self.c_out) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_4 = self.conv3_1.group_conv.bias primals_2 = self.conv3_1.group_conv.weight_g primals_3 = self.conv3_1.group_conv.weight_v primals_7 = self.conv3_1.depth_conv.bias primals_5 = self.conv3_1.depth_conv.weight_g primals_6 = self.conv3_1.depth_conv.weight_v primals_10 = self.conv3_1.point_conv.bias primals_8 = self.conv3_1.point_conv.weight_g primals_9 = self.conv3_1.point_conv.weight_v primals_13 = self.convd_1.group_conv.bias primals_11 = self.convd_1.group_conv.weight_g primals_12 = self.convd_1.group_conv.weight_v primals_16 = self.convd_1.depth_conv.bias primals_14 = self.convd_1.depth_conv.weight_g primals_15 = self.convd_1.depth_conv.weight_v primals_19 = self.convd_1.point_conv.bias primals_17 = self.convd_1.point_conv.weight_g primals_18 = self.convd_1.point_conv.weight_v primals_22 = self.conv3_2.group_conv.bias primals_20 = self.conv3_2.group_conv.weight_g primals_21 = self.conv3_2.group_conv.weight_v primals_25 = self.conv3_2.depth_conv.bias primals_23 = self.conv3_2.depth_conv.weight_g primals_24 = self.conv3_2.depth_conv.weight_v primals_28 = self.conv3_2.point_conv.bias primals_26 = self.conv3_2.point_conv.weight_g primals_27 = self.conv3_2.point_conv.weight_v primals_31 = self.convd_2.group_conv.bias primals_29 = self.convd_2.group_conv.weight_g primals_30 = self.convd_2.group_conv.weight_v primals_34 = self.convd_2.depth_conv.bias primals_32 = self.convd_2.depth_conv.weight_g primals_33 = self.convd_2.depth_conv.weight_v primals_37 = self.convd_2.point_conv.bias primals_35 = self.convd_2.point_conv.weight_g primals_36 = self.convd_2.point_conv.weight_v primals_40 = self.conv3_3.group_conv.bias primals_38 = self.conv3_3.group_conv.weight_g primals_39 = self.conv3_3.group_conv.weight_v primals_43 = self.conv3_3.depth_conv.bias primals_41 = self.conv3_3.depth_conv.weight_g primals_42 = self.conv3_3.depth_conv.weight_v primals_46 = self.conv3_3.point_conv.bias primals_44 = self.conv3_3.point_conv.weight_g primals_45 = self.conv3_3.point_conv.weight_v primals_49 = self.convd_3.group_conv.bias primals_47 = self.convd_3.group_conv.weight_g primals_48 = self.convd_3.group_conv.weight_v primals_52 = self.convd_3.depth_conv.bias primals_50 = self.convd_3.depth_conv.weight_g primals_51 = self.convd_3.depth_conv.weight_v primals_55 = self.convd_3.point_conv.bias primals_53 = self.convd_3.point_conv.weight_g primals_54 = self.convd_3.point_conv.weight_v primals_58 = self.conv_last.bias primals_56 = self.conv_last.weight_g primals_57 = self.conv_last.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58]) return output[0]
wwjfsfs/wwjyyds
MIRB1
false
13,210
[ "MIT" ]
0
80cd6267fde7cd98838078a0d5178a557ceb7414
https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414
BertLayer
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn import torch.nn.functional as F class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transform(self, x, linear_layer): bs, seq_len = x.shape[:2] proj = linear_layer(x) proj = proj.view(bs, seq_len, self.num_attention_heads, self. attention_head_size) proj = proj.transpose(1, 2) return proj def attention(self, key, query, value, attention_mask): att_scores = query @ key.transpose(-2, -1) / math.sqrt(self. attention_head_size) att_scores.masked_fill_(attention_mask == -10000.0, value=-10000.0) att_scores = F.softmax(att_scores, dim=-1) att_scores = self.dropout(att_scores) return att_scores @ value def forward(self, hidden_states, attention_mask): key_layer = self.transform(hidden_states, self.key) value_layer = self.transform(hidden_states, self.value) query_layer = self.transform(hidden_states, self.query) attn_value = self.attention(key_layer, query_layer, value_layer, attention_mask) return attn_value class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.self_attention = BertSelfAttention(config) self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size ) self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps= config.layer_norm_eps) self.attention_dropout = nn.Dropout(config.hidden_dropout_prob) self.interm_dense = nn.Linear(config.hidden_size, config. intermediate_size) self.interm_af = F.gelu self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size ) self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.out_dropout = nn.Dropout(config.hidden_dropout_prob) def add_norm(self, input, output, dense_layer, dropout, ln_layer): """ input: the input output: the input that requires the sublayer to transform dense_layer, dropput: the sublayer ln_layer: layer norm that takes input+sublayer(output) """ sublayer = dropout(dense_layer(output)) return ln_layer(input + sublayer) def forward(self, hidden_states, attention_mask): attn_values = self.self_attention(hidden_states, attention_mask) bs = hidden_states.size(0) attn_values = attn_values.transpose(1, 2).contiguous().view(bs, -1, self.self_attention.all_head_size) hidden_states = self.add_norm(hidden_states, attn_values, self. attention_dense, self.attention_dropout, self.attention_layer_norm) interim_hidden_states = self.interm_af(self.interm_dense(hidden_states) ) hidden_states = self.add_norm(hidden_states, interim_hidden_states, self.out_dense, self.out_dropout, self.out_layer_norm) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(num_attention_heads=4, hidden_size= 4, attention_probs_dropout_prob=0.5, layer_norm_eps=1, hidden_dropout_prob=0.5, intermediate_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -10000.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -10000.0 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x4 = xindex x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp6 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -10000.0 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf2 triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(64)](primals_8, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf7 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf9, buf6, buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf10, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_14 buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_gelu_7[grid(64)](buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_11, primals_17, buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf20, primals_15, primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transform(self, x, linear_layer): bs, seq_len = x.shape[:2] proj = linear_layer(x) proj = proj.view(bs, seq_len, self.num_attention_heads, self. attention_head_size) proj = proj.transpose(1, 2) return proj def attention(self, key, query, value, attention_mask): att_scores = query @ key.transpose(-2, -1) / math.sqrt(self. attention_head_size) att_scores.masked_fill_(attention_mask == -10000.0, value=-10000.0) att_scores = F.softmax(att_scores, dim=-1) att_scores = self.dropout(att_scores) return att_scores @ value def forward(self, hidden_states, attention_mask): key_layer = self.transform(hidden_states, self.key) value_layer = self.transform(hidden_states, self.value) query_layer = self.transform(hidden_states, self.query) attn_value = self.attention(key_layer, query_layer, value_layer, attention_mask) return attn_value class BertLayerNew(nn.Module): def __init__(self, config): super().__init__() self.self_attention = BertSelfAttention(config) self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size ) self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps= config.layer_norm_eps) self.attention_dropout = nn.Dropout(config.hidden_dropout_prob) self.interm_dense = nn.Linear(config.hidden_size, config. intermediate_size) self.interm_af = F.gelu self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size ) self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.out_dropout = nn.Dropout(config.hidden_dropout_prob) def add_norm(self, input, output, dense_layer, dropout, ln_layer): """ input: the input output: the input that requires the sublayer to transform dense_layer, dropput: the sublayer ln_layer: layer norm that takes input+sublayer(output) """ sublayer = dropout(dense_layer(output)) return ln_layer(input + sublayer) def forward(self, input_0, input_1): primals_2 = self.self_attention.query.weight primals_3 = self.self_attention.query.bias primals_4 = self.self_attention.key.weight primals_5 = self.self_attention.key.bias primals_6 = self.self_attention.value.weight primals_7 = self.self_attention.value.bias primals_9 = self.attention_dense.weight primals_10 = self.attention_dense.bias primals_11 = self.attention_layer_norm.weight primals_12 = self.attention_layer_norm.bias primals_13 = self.interm_dense.weight primals_14 = self.interm_dense.bias primals_15 = self.out_dense.weight primals_16 = self.out_dense.bias primals_17 = self.out_layer_norm.weight primals_18 = self.out_layer_norm.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
Abhimanyu08/minbert-assignment
BertLayer
false
13,211
[ "Apache-2.0" ]
0
1610364213b1aab2d5446175dffabd7e1742833b
https://github.com/Abhimanyu08/minbert-assignment/tree/1610364213b1aab2d5446175dffabd7e1742833b
BertOutput
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.utils.checkpoint class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1 class BertOutputNew(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Clemens123/transformers
BertOutput
false
13,212
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
DeiTSelfAttention
from _paritybench_helpers import _mock_config import math import torch from torch import nn import torch.utils.checkpoint class DeiTSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf9 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class DeiTSelfAttentionNew(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Clemens123/transformers
DeiTSelfAttention
false
13,213
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
IBertClassificationHead
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.utils.checkpoint class IBertClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): hidden_states = features[:, 0, :] hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5, num_labels=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4 class IBertClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Clemens123/transformers
IBertClassificationHead
false
13,214
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
PropMaxPool
from _paritybench_helpers import _mock_config import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn class PropMaxPool(nn.Module): def __init__(self, cfg): super(PropMaxPool, self).__init__() num_layers = cfg.NUM_LAYERS self.layers = nn.ModuleList([nn.Identity()] + [nn.MaxPool1d(2, stride=1) for _ in range(num_layers - 1)]) self.num_layers = num_layers def forward(self, x): batch_size, hidden_size, num_clips = x.shape map_h = x.new_zeros(batch_size, hidden_size, num_clips, num_clips) map_mask = x.new_zeros(batch_size, 1, num_clips, num_clips) for dig_idx, pool in enumerate(self.layers): x = pool(x) start_idxs = [s_idx for s_idx in range(0, num_clips - dig_idx, 1)] end_idxs = [(s_idx + dig_idx) for s_idx in start_idxs] map_h[:, :, start_idxs, end_idxs] = x map_mask[:, :, start_idxs, end_idxs] += 1 return map_h, map_mask def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(NUM_LAYERS=4)}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_index_put_new_zeros_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp11 = tl.load(in_ptr0 + x2, xmask) tmp0 = x0 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tl.store(out_ptr0 + (5 * tmp10 + 16 * x1), tmp11, xmask) @triton.jit def triton_poi_fused_index_put_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tl.full([1], 2, tl.int64) tmp7 = tmp3 < tmp6 tmp8 = tl.where(tmp7, tmp4, tmp6) tmp9 = tl.full([1], 0, tl.int64) tmp10 = tl.where(tmp5, tmp9, tmp8) tmp11 = tl.full([1], 3, tl.int64) tmp12 = tl.where(tmp7, tmp6, tmp11) tmp13 = tl.where(tmp5, tmp4, tmp12) tl.store(out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr1 + (tmp13 + 4 * tmp10 + 16 * x1), tmp2, xmask) @triton.jit def triton_poi_fused_index_put_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 3 * x1), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 3 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tl.full([1], 0, tl.int64) tmp7 = tl.where(tmp5, tmp6, tmp4) tmp8 = tl.full([1], 2, tl.int64) tmp9 = tl.full([1], 3, tl.int64) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr1 + (tmp10 + 4 * tmp7 + 16 * x1), tmp2, xmask) @triton.jit def triton_poi_fused_index_put_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (3 + 16 * x0), tmp2, xmask) @triton.jit def triton_poi_fused_new_zeros_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_index_index_put_new_zeros_6(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 tmp0 = x0 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp11 = 1.0 tl.store(out_ptr0 + (5 * tmp10 + 16 * x1), tmp11, xmask) @triton.jit def triton_poi_fused_add_index_index_put_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.where(tmp4, tmp1, tmp3) tmp6 = tl.full([1], 0, tl.int64) tmp7 = tl.where(tmp2, tmp6, tmp5) tmp8 = tl.full([1], 3, tl.int64) tmp9 = tl.where(tmp4, tmp3, tmp8) tmp10 = tl.where(tmp2, tmp1, tmp9) tmp11 = tl.load(in_ptr0 + (tmp10 + 4 * tmp7 + 16 * x1), xmask, eviction_policy='evict_last') tmp12 = 1.0 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (tmp10 + 4 * tmp7 + 16 * x1), tmp13, xmask) @triton.jit def triton_poi_fused_add_index_index_put_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp3, tmp1) tmp5 = tl.full([1], 2, tl.int64) tmp6 = tl.full([1], 3, tl.int64) tmp7 = tl.where(tmp2, tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (tmp7 + 4 * tmp4 + 16 * x1), xmask, eviction_policy='evict_last') tmp9 = 1.0 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (tmp7 + 4 * tmp4 + 16 * x1), tmp10, xmask) @triton.jit def triton_poi_fused_add_index_index_put_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (3 + 16 * x0), tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(256)](buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) triton_poi_fused_index_put_new_zeros_1[grid(64)](arg0_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf0 = empty_strided_cuda((4, 4, 1, 3), (12, 3, 48, 1), torch.float32) triton_poi_fused_index_put_max_pool2d_with_indices_2[grid(48)](arg0_1, buf0, buf2, 48, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 32, 1), torch.float32) triton_poi_fused_index_put_max_pool2d_with_indices_3[grid(32)](buf0, buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf0 triton_poi_fused_index_put_4[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) triton_poi_fused_new_zeros_5[grid(64)](buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) triton_poi_fused_add_index_index_put_new_zeros_6[grid(16)](buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) triton_poi_fused_add_index_index_put_7[grid(12)](buf7, buf7, 12, XBLOCK=16, num_warps=1, num_stages=1) triton_poi_fused_add_index_index_put_8[grid(8)](buf7, buf7, 8, XBLOCK=8, num_warps=1, num_stages=1) triton_poi_fused_add_index_index_put_9[grid(4)](buf7, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf2, buf7 class PropMaxPoolNew(nn.Module): def __init__(self, cfg): super(PropMaxPoolNew, self).__init__() num_layers = cfg.NUM_LAYERS self.layers = nn.ModuleList([nn.Identity()] + [nn.MaxPool1d(2, stride=1) for _ in range(num_layers - 1)]) self.num_layers = num_layers def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
MicroTensor-ai/episodic-memory
PropMaxPool
false
13,215
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
StructuredAutoencoderNet
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from collections import OrderedDict class StructuredAutoencoderNet(nn.Module): def __init__(self, p, encoder_config, decoder_config, dropout_rate=0): super().__init__() self.p = p self.encoder_config = encoder_config self.decoder_config = decoder_config self.weights_layer = [] index = 0 self.encoder_layer = [] for i in range(len(self.encoder_config['dimension']) - 1): self.encoder_layer.append(('linear' + str(index), nn.Linear(int (self.encoder_config['dimension'][i]), int(self. encoder_config['dimension'][i + 1])))) if i != len(self.encoder_config['dimension']) - 2: self.encoder_layer.append(('Sigmoid' + str(index), nn. Sigmoid())) self.encoder_layer.append(('dropout' + str(index), nn. Dropout(p=dropout_rate))) index += 1 for index, layer in enumerate(self.encoder_layer): if layer[0] == 'linear': self.weights_layer.append(torch.nn.Parameter(layer[1].weight)) self.encoder_layer[index][1].weight = self.weights_layer[-1] index = 0 self.decoder_layer = [] for i in range(len(self.decoder_config['dimension']) - 1): if i != 0: self.decoder_layer.append(('dropout' + str(index), nn. Dropout(p=dropout_rate))) self.decoder_layer.append(('linear' + str(index), nn.Linear(int (self.decoder_config['dimension'][i]), int(self. decoder_config['dimension'][i + 1])))) if i != len(self.decoder_config['dimension']) - 2: self.decoder_layer.append(('Sigmoid' + str(index), nn. Sigmoid())) index += 1 self.encoder_net = nn.Sequential(OrderedDict(self.encoder_layer)) self.decoder_net = nn.Sequential(OrderedDict(self.decoder_layer)) def encode(self, X, mask): index = 0 for layer in self.encoder_layer: if layer[0] == 'linear': X = torch.nn.functional.linear(X, self.weights_layer[index]) index += 1 else: X = layer[1](X) X = X * mask return X def decode(self, X): index = len(self.weights_layer) - 1 for layer in self.decoder_layer: if layer[0] == 'linear': X = torch.nn.functional.linear(X, self.weights_layer[index].t() ) index -= 1 else: X = layer[1](X) return X def forward(self, X, mask): X = self.encode(X, mask) X = self.decode(X) return X def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'p': 4, 'encoder_config': _mock_config(dimension=[4, 4]), 'decoder_config': _mock_config(dimension=[4, 4])}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](buf1, primals_2, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5 class StructuredAutoencoderNetNew(nn.Module): def __init__(self, p, encoder_config, decoder_config, dropout_rate=0): super().__init__() self.p = p self.encoder_config = encoder_config self.decoder_config = decoder_config self.weights_layer = [] index = 0 self.encoder_layer = [] for i in range(len(self.encoder_config['dimension']) - 1): self.encoder_layer.append(('linear' + str(index), nn.Linear(int (self.encoder_config['dimension'][i]), int(self. encoder_config['dimension'][i + 1])))) if i != len(self.encoder_config['dimension']) - 2: self.encoder_layer.append(('Sigmoid' + str(index), nn. Sigmoid())) self.encoder_layer.append(('dropout' + str(index), nn. Dropout(p=dropout_rate))) index += 1 for index, layer in enumerate(self.encoder_layer): if layer[0] == 'linear': self.weights_layer.append(torch.nn.Parameter(layer[1].weight)) self.encoder_layer[index][1].weight = self.weights_layer[-1] index = 0 self.decoder_layer = [] for i in range(len(self.decoder_config['dimension']) - 1): if i != 0: self.decoder_layer.append(('dropout' + str(index), nn. Dropout(p=dropout_rate))) self.decoder_layer.append(('linear' + str(index), nn.Linear(int (self.decoder_config['dimension'][i]), int(self. decoder_config['dimension'][i + 1])))) if i != len(self.decoder_config['dimension']) - 2: self.decoder_layer.append(('Sigmoid' + str(index), nn. Sigmoid())) index += 1 self.encoder_net = nn.Sequential(OrderedDict(self.encoder_layer)) self.decoder_net = nn.Sequential(OrderedDict(self.decoder_layer)) def encode(self, X, mask): index = 0 for layer in self.encoder_layer: if layer[0] == 'linear': X = torch.nn.functional.linear(X, self.weights_layer[index]) index += 1 else: X = layer[1](X) X = X * mask return X def decode(self, X): index = len(self.weights_layer) - 1 for layer in self.decoder_layer: if layer[0] == 'linear': X = torch.nn.functional.linear(X, self.weights_layer[index].t() ) index -= 1 else: X = layer[1](X) return X def forward(self, input_0, input_1): primals_1 = self.encoder_net.linear0.weight primals_2 = self.encoder_net.linear0.bias primals_5 = self.decoder_net.linear0.weight primals_6 = self.decoder_net.linear0.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
CHuanSite/smautoPy
StructuredAutoencoderNet
false
13,216
[ "MIT" ]
0
46c6b2088fc3c488870cee2ab88ac9f1ce779c0d
https://github.com/CHuanSite/smautoPy/tree/46c6b2088fc3c488870cee2ab88ac9f1ce779c0d
LxmertCrossAttentionLayer
from _paritybench_helpers import _mock_config import math import torch from torch import nn import torch.utils.checkpoint class LxmertAttention(nn.Module): def __init__(self, config, ctx_dim=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.head_size = self.num_attention_heads * self.attention_head_size if ctx_dim is None: ctx_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.head_size) self.key = nn.Linear(ctx_dim, self.head_size) self.value = nn.Linear(ctx_dim, self.head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, context, attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs class LxmertAttentionOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LxmertCrossAttentionLayer(nn.Module): def __init__(self, config): super().__init__() self.att = LxmertAttention(config) self.output = LxmertAttentionOutput(config) def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions) if output_attentions: attention_probs = output[1] attention_output = self.output(output[0], input_tensor) outputs = (attention_output, attention_probs ) if output_attentions else (attention_output,) return outputs def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_10, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_10 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_3, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_3, buf12, buf13, primals_11, primals_12, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_12 return buf14, primals_3, primals_11, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_9 class LxmertAttention(nn.Module): def __init__(self, config, ctx_dim=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.head_size = self.num_attention_heads * self.attention_head_size if ctx_dim is None: ctx_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.head_size) self.key = nn.Linear(ctx_dim, self.head_size) self.value = nn.Linear(ctx_dim, self.head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, context, attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs class LxmertAttentionOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LxmertCrossAttentionLayerNew(nn.Module): def __init__(self, config): super().__init__() self.att = LxmertAttention(config) self.output = LxmertAttentionOutput(config) def forward(self, input_0, input_1): primals_1 = self.att.query.weight primals_2 = self.att.query.bias primals_4 = self.att.key.weight primals_5 = self.att.key.bias primals_7 = self.att.value.weight primals_8 = self.att.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Clemens123/transformers
LxmertCrossAttentionLayer
false
13,217
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
MPNetSelfAttention
from _paritybench_helpers import _mock_config import math import torch from torch import nn import torch.utils.checkpoint class MPNetSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.q = nn.Linear(config.hidden_size, self.all_head_size) self.k = nn.Linear(config.hidden_size, self.all_head_size) self.v = nn.Linear(config.hidden_size, self.all_head_size) self.o = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs): q = self.q(hidden_states) k = self.k(hidden_states) v = self.v(hidden_states) q = self.transpose_for_scores(q) k = self.transpose_for_scores(k) v = self.transpose_for_scores(v) attention_scores = torch.matmul(q, k.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if position_bias is not None: attention_scores += position_bias if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask c = torch.matmul(attention_probs, v) c = c.permute(0, 2, 1, 3).contiguous() new_c_shape = c.size()[:-2] + (self.all_head_size,) c = c.view(*new_c_shape) o = self.o(c) outputs = (o, attention_probs) if output_attentions else (o,) return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_9 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_8 class MPNetSelfAttentionNew(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.q = nn.Linear(config.hidden_size, self.all_head_size) self.k = nn.Linear(config.hidden_size, self.all_head_size) self.v = nn.Linear(config.hidden_size, self.all_head_size) self.o = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.q.weight primals_2 = self.q.bias primals_4 = self.k.weight primals_5 = self.k.bias primals_6 = self.v.weight primals_7 = self.v.bias primals_8 = self.o.weight primals_9 = self.o.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Clemens123/transformers
MPNetSelfAttention
false
13,218
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
BertOutAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertOutAttention(nn.Module): def __init__(self, config, ctx_dim=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) if ctx_dim is None: ctx_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(ctx_dim, self.all_head_size) self.value = nn.Linear(ctx_dim, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, context, attention_mask=None): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer, attention_scores def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_div_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x2 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_div_1[grid(256)](buf5, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf9, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf10 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), buf6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertOutAttentionNew(nn.Module): def __init__(self, config, ctx_dim=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) if ctx_dim is None: ctx_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(ctx_dim, self.all_head_size) self.value = nn.Linear(ctx_dim, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_7 = self.value.weight primals_8 = self.value.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
MarSaKi/Recurrent-VLN-BERT
BertOutAttention
false
13,219
[ "MIT" ]
0
c1170f9ca48c234a0c3ded19f9273f2fdcd571d6
https://github.com/MarSaKi/Recurrent-VLN-BERT/tree/c1170f9ca48c234a0c3ded19f9273f2fdcd571d6
IBertLMHead
from _paritybench_helpers import _mock_config import math import torch from torch import nn import torch.utils.checkpoint def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class IBertLMHead(nn.Module): """I-BERT Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) x = self.decoder(x) return x def _tie_weights(self): self.bias = self.decoder.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, vocab_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tmp15 = tmp14 * tmp1 tmp16 = tmp14 * tmp14 tmp17 = tmp16 * tmp14 tmp18 = tmp17 * tmp5 tmp19 = tmp14 + tmp18 tmp20 = tmp19 * tmp8 tmp21 = libdevice.tanh(tmp20) tmp22 = tmp21 + tmp11 tmp23 = tmp15 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 * tmp1 tmp27 = tmp25 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp5 tmp30 = tmp25 + tmp29 tmp31 = tmp30 * tmp8 tmp32 = libdevice.tanh(tmp31) tmp33 = tmp32 + tmp11 tmp34 = tmp26 * tmp33 tmp35 = tmp24 + tmp34 tmp37 = tmp36 * tmp1 tmp38 = tmp36 * tmp36 tmp39 = tmp38 * tmp36 tmp40 = tmp39 * tmp5 tmp41 = tmp36 + tmp40 tmp42 = tmp41 * tmp8 tmp43 = libdevice.tanh(tmp42) tmp44 = tmp43 + tmp11 tmp45 = tmp37 * tmp44 tmp46 = tmp35 + tmp45 tmp47 = 4.0 tmp48 = tmp46 / tmp47 tmp49 = tmp13 - tmp48 tmp50 = tmp49 * tmp49 tmp51 = tmp23 - tmp48 tmp52 = tmp51 * tmp51 tmp53 = tmp50 + tmp52 tmp54 = tmp34 - tmp48 tmp55 = tmp54 * tmp54 tmp56 = tmp53 + tmp55 tmp57 = tmp45 - tmp48 tmp58 = tmp57 * tmp57 tmp59 = tmp56 + tmp58 tmp60 = tmp59 / tmp47 tl.store(out_ptr0 + x0, tmp48, xmask) tl.store(out_ptr1 + x0, tmp60, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tmp15 = tmp13 - tmp14 tmp17 = tmp16 + tmp11 tmp18 = libdevice.rsqrt(tmp17) tmp19 = tmp15 * tmp18 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tl.store(out_ptr0 + x2, tmp23, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1[grid(256)](buf0, buf1, buf2, primals_4, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6 def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class IBertLMHeadNew(nn.Module): """I-BERT Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.bias = self.decoder.bias def forward(self, input_0): primals_2 = self.bias primals_1 = self.dense.weight primals_4 = self.dense.bias primals_5 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_6 = self.decoder.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Clemens123/transformers
IBertLMHead
false
13,220
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
BoundNeg
from _paritybench_helpers import _mock_config import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number from torch.nn import MSELoss def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundNeg(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def forward(self, x): return -x def bound_backward(self, last_lA, last_uA, x): return [(-last_lA if last_lA is not None else None, -last_uA if last_uA is not None else None)], 0, 0 def bound_forward(self, dim_in, x): return LinearBound(-x.lw, -x.lb, -x.uw, -x.ub) def interval_propagate(self, *v): return -v[0][1], -v[0][0] def infer_batch_dim(self, batch_size, *x): return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4, 'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion =MSELoss()), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundNegNew(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def bound_backward(self, last_lA, last_uA, x): return [(-last_lA if last_lA is not None else None, -last_uA if last_uA is not None else None)], 0, 0 def bound_forward(self, dim_in, x): return LinearBound(-x.lw, -x.lb, -x.uw, -x.ub) def interval_propagate(self, *v): return -v[0][1], -v[0][0] def infer_batch_dim(self, batch_size, *x): return x[0] def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundNeg
false
13,221
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
BoundPow
from _paritybench_helpers import _mock_config import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number from torch.nn import MSELoss def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundPow(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def forward(self, x, y): return torch.pow(x, y) def interval_propagate(self, *v): assert not self.is_input_perturbed(1) exp = v[1][0] assert exp == int(exp) exp = int(exp) pl, pu = torch.pow(v[0][0], exp), torch.pow(v[0][1], exp) if exp % 2 == 1: return pl, pu else: pl, pu = torch.min(pl, pu), torch.max(pl, pu) mask = 1 - ((v[0][0] < 0) * (v[0][1] > 0)).float() return pl * mask, pu def infer_batch_dim(self, batch_size, *x): return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4, 'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion =MSELoss()), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.pow(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundPowNew(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def interval_propagate(self, *v): assert not self.is_input_perturbed(1) exp = v[1][0] assert exp == int(exp) exp = int(exp) pl, pu = torch.pow(v[0][0], exp), torch.pow(v[0][1], exp) if exp % 2 == 1: return pl, pu else: pl, pu = torch.min(pl, pu), torch.max(pl, pu) mask = 1 - ((v[0][0] < 0) * (v[0][1] > 0)).float() return pl * mask, pu def infer_batch_dim(self, batch_size, *x): return x[0] def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundPow
false
13,222
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
BoundNot
from _paritybench_helpers import _mock_config import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number from torch.nn import MSELoss def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundNot(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def forward(self, x): return x.logical_not() def infer_batch_dim(self, batch_size, *x): return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4, 'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion =MSELoss()), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logical_not_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 != 0 tmp2 = tmp1 == 0 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_logical_not_0[grid(256)](arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundNotNew(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def infer_batch_dim(self, batch_size, *x): return x[0] def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundNot
false
13,223
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
BoundSqrt
from _paritybench_helpers import _mock_config import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number from torch.nn import MSELoss def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundSqrt(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def forward(self, x): return torch.sqrt(x) def infer_batch_dim(self, batch_size, *x): return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4, 'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion =MSELoss()), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.sqrt(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundSqrtNew(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def infer_batch_dim(self, batch_size, *x): return x[0] def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundSqrt
false
13,224
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
CanineAttention
from _paritybench_helpers import _mock_config import math import torch from typing import List from typing import Tuple from torch import nn from typing import Set import torch.utils.checkpoint def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int', head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking :obj:`already_pruned_heads` into account. Args: heads (:obj:`List[int]`): List of the indices of heads to prune. n_heads (:obj:`int`): The number of heads in the model. head_size (:obj:`int`): The size of each head. already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. Returns: :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads for head in heads: head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long() return heads, index def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim: 'int'=0) ->nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`torch.nn.Linear`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None ) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class CanineSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, from_tensor, to_tensor, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(from_tensor) key_layer = self.transpose_for_scores(self.key(to_tensor)) value_layer = self.transpose_for_scores(self.value(to_tensor)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): seq_length = from_tensor.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self. max_position_embeddings - 1) positional_embedding = positional_embedding if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: if attention_mask.ndim == 3: attention_mask = torch.unsqueeze(attention_mask, dim=1) attention_mask = (1.0 - attention_mask.float()) * -10000.0 attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs class CanineSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class CanineAttention(nn.Module): """ Additional arguments related to local attention: - **local** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether to apply local attention. - **always_attend_to_first_position** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Should all blocks be able to attend to the :obj:`to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Should the `from_tensor`'s first position be able to attend to all positions within the `from_tensor`? - **attend_from_chunk_width** (:obj:`int`, `optional`, defaults to 128) -- The width of each block-wise chunk in :obj:`from_tensor`. - **attend_from_chunk_stride** (:obj:`int`, `optional`, defaults to 128) -- The number of elements to skip when moving to the next block in :obj:`from_tensor`. - **attend_to_chunk_width** (:obj:`int`, `optional`, defaults to 128) -- The width of each block-wise chunk in `to_tensor`. - **attend_to_chunk_stride** (:obj:`int`, `optional`, defaults to 128) -- The number of elements to skip when moving to the next block in :obj:`to_tensor`. """ def __init__(self, config, local=False, always_attend_to_first_position: 'bool'=False, first_position_attends_to_all: 'bool'=False, attend_from_chunk_width: 'int'=128, attend_from_chunk_stride: 'int' =128, attend_to_chunk_width: 'int'=128, attend_to_chunk_stride: 'int'=128): super().__init__() self.self = CanineSelfAttention(config) self.output = CanineSelfOutput(config) self.pruned_heads = set() self.local = local if attend_from_chunk_width < attend_from_chunk_stride: raise ValueError( '`attend_from_chunk_width` < `attend_from_chunk_stride`would cause sequence positions to get skipped.' ) if attend_to_chunk_width < attend_to_chunk_stride: raise ValueError( '`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped.' ) self.always_attend_to_first_position = always_attend_to_first_position self.first_position_attends_to_all = first_position_attends_to_all self.attend_from_chunk_width = attend_from_chunk_width self.attend_from_chunk_stride = attend_from_chunk_stride self.attend_to_chunk_width = attend_to_chunk_width self.attend_to_chunk_stride = attend_to_chunk_stride def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self. num_attention_heads, self.self.attention_head_size, self. pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len( heads) self.self.all_head_size = (self.self.attention_head_size * self. self.num_attention_heads) self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): if not self.local: self_outputs = self.self(hidden_states, hidden_states, attention_mask, head_mask, output_attentions) attention_output = self_outputs[0] else: from_seq_length = to_seq_length = hidden_states.shape[1] from_tensor = to_tensor = hidden_states from_chunks = [] if self.first_position_attends_to_all: from_chunks.append((0, 1)) from_start = 1 else: from_start = 0 for chunk_start in range(from_start, from_seq_length, self. attend_from_chunk_stride): chunk_end = min(from_seq_length, chunk_start + self. attend_from_chunk_width) from_chunks.append((chunk_start, chunk_end)) to_chunks = [] if self.first_position_attends_to_all: to_chunks.append((0, to_seq_length)) for chunk_start in range(0, to_seq_length, self. attend_to_chunk_stride): chunk_end = min(to_seq_length, chunk_start + self. attend_to_chunk_width) to_chunks.append((chunk_start, chunk_end)) if len(from_chunks) != len(to_chunks): raise ValueError( f'Expected to have same number of `from_chunks` ({from_chunks}) and `to_chunks` ({from_chunks}). Check strides.' ) attention_output_chunks = [] attention_probs_chunks = [] for (from_start, from_end), (to_start, to_end) in zip(from_chunks, to_chunks): from_tensor_chunk = from_tensor[:, from_start:from_end, :] to_tensor_chunk = to_tensor[:, to_start:to_end, :] attention_mask_chunk = attention_mask[:, from_start: from_end, to_start:to_end] if self.always_attend_to_first_position: cls_attention_mask = attention_mask[:, from_start: from_end, 0:1] attention_mask_chunk = torch.cat([cls_attention_mask, attention_mask_chunk], dim=2) cls_position = to_tensor[:, 0:1, :] to_tensor_chunk = torch.cat([cls_position, to_tensor_chunk], dim=1) attention_outputs_chunk = self.self(from_tensor_chunk, to_tensor_chunk, attention_mask_chunk, head_mask, output_attentions) attention_output_chunks.append(attention_outputs_chunk[0]) if output_attentions: attention_probs_chunks.append(attention_outputs_chunk[1]) attention_output = torch.cat(attention_output_chunks, dim=1) attention_output = self.output(attention_output, hidden_states) outputs = attention_output, if not self.local: outputs = outputs + self_outputs[1:] else: outputs = outputs + tuple(attention_probs_chunks) return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, position_embedding_type=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from typing import List from typing import Tuple from torch import nn from typing import Set import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_9 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_3, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_3, buf12, buf13, primals_10, primals_11, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_11 return buf14, primals_3, primals_10, buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8 def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int', head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking :obj:`already_pruned_heads` into account. Args: heads (:obj:`List[int]`): List of the indices of heads to prune. n_heads (:obj:`int`): The number of heads in the model. head_size (:obj:`int`): The size of each head. already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. Returns: :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads for head in heads: head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long() return heads, index def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim: 'int'=0) ->nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`torch.nn.Linear`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None ) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class CanineSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})' ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, from_tensor, to_tensor, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(from_tensor) key_layer = self.transpose_for_scores(self.key(to_tensor)) value_layer = self.transpose_for_scores(self.value(to_tensor)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): seq_length = from_tensor.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self. max_position_embeddings - 1) positional_embedding = positional_embedding if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: if attention_mask.ndim == 3: attention_mask = torch.unsqueeze(attention_mask, dim=1) attention_mask = (1.0 - attention_mask.float()) * -10000.0 attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs class CanineSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class CanineAttentionNew(nn.Module): """ Additional arguments related to local attention: - **local** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether to apply local attention. - **always_attend_to_first_position** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Should all blocks be able to attend to the :obj:`to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Should the `from_tensor`'s first position be able to attend to all positions within the `from_tensor`? - **attend_from_chunk_width** (:obj:`int`, `optional`, defaults to 128) -- The width of each block-wise chunk in :obj:`from_tensor`. - **attend_from_chunk_stride** (:obj:`int`, `optional`, defaults to 128) -- The number of elements to skip when moving to the next block in :obj:`from_tensor`. - **attend_to_chunk_width** (:obj:`int`, `optional`, defaults to 128) -- The width of each block-wise chunk in `to_tensor`. - **attend_to_chunk_stride** (:obj:`int`, `optional`, defaults to 128) -- The number of elements to skip when moving to the next block in :obj:`to_tensor`. """ def __init__(self, config, local=False, always_attend_to_first_position: 'bool'=False, first_position_attends_to_all: 'bool'=False, attend_from_chunk_width: 'int'=128, attend_from_chunk_stride: 'int' =128, attend_to_chunk_width: 'int'=128, attend_to_chunk_stride: 'int'=128): super().__init__() self.self = CanineSelfAttention(config) self.output = CanineSelfOutput(config) self.pruned_heads = set() self.local = local if attend_from_chunk_width < attend_from_chunk_stride: raise ValueError( '`attend_from_chunk_width` < `attend_from_chunk_stride`would cause sequence positions to get skipped.' ) if attend_to_chunk_width < attend_to_chunk_stride: raise ValueError( '`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped.' ) self.always_attend_to_first_position = always_attend_to_first_position self.first_position_attends_to_all = first_position_attends_to_all self.attend_from_chunk_width = attend_from_chunk_width self.attend_from_chunk_stride = attend_from_chunk_stride self.attend_to_chunk_width = attend_to_chunk_width self.attend_to_chunk_stride = attend_to_chunk_stride def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self. num_attention_heads, self.self.attention_head_size, self. pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len( heads) self.self.all_head_size = (self.self.attention_head_size * self. self.num_attention_heads) self.pruned_heads = self.pruned_heads.union(heads) def forward(self, input_0): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_8 = self.output.dense.weight primals_9 = self.output.dense.bias primals_10 = self.output.LayerNorm.weight primals_11 = self.output.LayerNorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Clemens123/transformers
CanineAttention
false
13,225
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
AlbertAttention
from _paritybench_helpers import _mock_config import math import torch from typing import List from typing import Tuple from torch import nn from typing import Set import torch.utils.checkpoint def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int', head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking :obj:`already_pruned_heads` into account. Args: heads (:obj:`List[int]`): List of the indices of heads to prune. n_heads (:obj:`int`): The number of heads in the model. head_size (:obj:`int`): The size of each head. already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. Returns: :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads for head in heads: head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long() return heads, index def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim: 'int'=0) ->nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`torch.nn.Linear`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None ) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class AlbertAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads}' ) self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = (config.hidden_size // config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob ) self.output_dropout = nn.Dropout(config.hidden_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.pruned_heads = set() self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self. num_attention_heads, self.attention_head_size, self.pruned_heads) self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = (self.attention_head_size * self. num_attention_heads) self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self. max_position_embeddings - 1) positional_embedding = positional_embedding if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key) attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.transpose(2, 1).flatten(2) projected_context_layer = self.dense(context_layer) projected_context_layer_dropout = self.output_dropout( projected_context_layer) layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout) return (layernormed_context_layer, attention_probs ) if output_attentions else (layernormed_context_layer,) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5, layer_norm_eps=1, position_embedding_type=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from typing import List from typing import Tuple from torch import nn from typing import Set import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf11, primals_9, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_3, buf11, primals_9, buf12, buf13, primals_10, primals_11, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_11 return buf14, primals_3, primals_9, primals_10, buf7, reinterpret_tensor( buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4 ), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8 def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int', head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking :obj:`already_pruned_heads` into account. Args: heads (:obj:`List[int]`): List of the indices of heads to prune. n_heads (:obj:`int`): The number of heads in the model. head_size (:obj:`int`): The size of each head. already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. Returns: :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads for head in heads: head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long() return heads, index def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim: 'int'=0) ->nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`torch.nn.Linear`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None ) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class AlbertAttentionNew(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads}' ) self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = (config.hidden_size // config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob ) self.output_dropout = nn.Dropout(config.hidden_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.pruned_heads = set() self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self. num_attention_heads, self.attention_head_size, self.pruned_heads) self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = (self.attention_head_size * self. num_attention_heads) self.pruned_heads = self.pruned_heads.union(heads) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_8 = self.dense.weight primals_9 = self.dense.bias primals_10 = self.LayerNorm.weight primals_11 = self.LayerNorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Clemens123/transformers
AlbertAttention
false
13,226
[ "Apache-2.0" ]
0
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
BoundReciprocal
from _paritybench_helpers import _mock_config import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number from torch.nn import MSELoss def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundActivation(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True self.relaxed = False def _init_linear(self, x): self.mask_pos = torch.gt(x.lower, 0) self.mask_neg = torch.lt(x.upper, 0) self.mask_both = 1 - self.mask_pos - self.mask_neg self.lw = torch.zeros(x.lower.shape, device=self.device) self.lb = self.lw.clone() self.uw = self.lw.clone() self.ub = self.lw.clone() def _add_linear(self, mask, type, k, x0, y0): if mask is None: mask = 1 if type == 'lower': w_out, b_out = self.lw, self.lb else: w_out, b_out = self.uw, self.ub w_out += mask * k b_out += mask * (-x0 * k + y0) def bound_relax(self, x): raise NotImplementedError def bound_backward(self, last_lA, last_uA, x): if not self.relaxed: self._init_linear(x) self.bound_relax(x) def _bound_oneside(last_A, sign=-1): if last_A is None: return None, 0 if self.batch_dim == 0: if sign == -1: _A = last_A.clamp(min=0) * self.lw.unsqueeze(0 ) + last_A.clamp(max=0) * self.uw.unsqueeze(0) _bias = last_A.clamp(min=0) * self.lb.unsqueeze(0 ) + last_A.clamp(max=0) * self.ub.unsqueeze(0) elif sign == 1: _A = last_A.clamp(min=0) * self.uw.unsqueeze(0 ) + last_A.clamp(max=0) * self.lw.unsqueeze(0) _bias = last_A.clamp(min=0) * self.ub.unsqueeze(0 ) + last_A.clamp(max=0) * self.lb.unsqueeze(0) while _bias.ndim > 2: _bias = torch.sum(_bias, dim=-1) elif self.batch_dim == -1: mask = torch.gt(last_A, 0.0) if sign == -1: _A = last_A * (mask * self.lw.unsqueeze(0).unsqueeze(1) + (1 - mask) * self.uw.unsqueeze(0).unsqueeze(1)) _bias = last_A * (mask * self.lb.unsqueeze(0).unsqueeze (1) + (1 - mask) * self.ub.unsqueeze(0).unsqueeze(1)) elif sign == 1: _A = last_A * (mask * self.uw.unsqueeze(0).unsqueeze(1) + (1 - mask) * self.lw.unsqueeze(0).unsqueeze(1)) _bias = last_A * (mask * self.ub.unsqueeze(0).unsqueeze (1) + (1 - mask) * self.lb.unsqueeze(0).unsqueeze(1)) while _bias.ndim > 2: _bias = torch.sum(_bias, dim=-1) else: raise NotImplementedError return _A, _bias lA, lbias = _bound_oneside(last_lA, sign=-1) uA, ubias = _bound_oneside(last_uA, sign=+1) return [(lA, uA)], lbias, ubias def bound_forward(self, dim_in, x): if not self.relaxed: self._init_linear(x) self.bound_relax(x) if self.lw.ndim > 0: if x.lw is not None: lw = self.lw.unsqueeze(1).clamp(min=0 ) * x.lw + self.lw.unsqueeze(1).clamp(max=0) * x.uw uw = self.uw.unsqueeze(1).clamp(max=0 ) * x.lw + self.uw.unsqueeze(1).clamp(min=0) * x.uw else: lw = uw = None elif x.lw is not None: lw = self.lw.unsqueeze(0).clamp(min=0) * x.lw + self.lw.unsqueeze(0 ).clamp(max=0) * x.uw uw = self.uw.unsqueeze(0).clamp(min=0) * x.lw + self.uw.unsqueeze(0 ).clamp(max=0) * x.uw else: lw = uw = None lb = self.lw.clamp(min=0) * x.lb + self.lw.clamp(max=0 ) * x.ub + self.lb ub = self.uw.clamp(max=0) * x.lb + self.uw.clamp(min=0 ) * x.ub + self.ub return LinearBound(lw, lb, uw, ub) def infer_batch_dim(self, batch_size, *x): return x[0] class BoundReciprocal(BoundActivation): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def forward(self, x): return torch.reciprocal(x) def bound_relax(self, x): m = (x.lower + x.upper) / 2 kl = -1 / m.pow(2) self._add_linear(mask=None, type='lower', k=kl, x0=m, y0=1.0 / m) ku = -1.0 / (x.lower * x.upper) self._add_linear(mask=None, type='upper', k=ku, x0=x.lower, y0=1.0 / x.lower) def interval_propagate(self, *v): h_L, h_U = v[0] return torch.reciprocal(h_U.float()), torch.reciprocal(h_L.float()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4, 'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion =MSELoss()), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundActivation(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True self.relaxed = False def _init_linear(self, x): self.mask_pos = torch.gt(x.lower, 0) self.mask_neg = torch.lt(x.upper, 0) self.mask_both = 1 - self.mask_pos - self.mask_neg self.lw = torch.zeros(x.lower.shape, device=self.device) self.lb = self.lw.clone() self.uw = self.lw.clone() self.ub = self.lw.clone() def _add_linear(self, mask, type, k, x0, y0): if mask is None: mask = 1 if type == 'lower': w_out, b_out = self.lw, self.lb else: w_out, b_out = self.uw, self.ub w_out += mask * k b_out += mask * (-x0 * k + y0) def bound_relax(self, x): raise NotImplementedError def bound_backward(self, last_lA, last_uA, x): if not self.relaxed: self._init_linear(x) self.bound_relax(x) def _bound_oneside(last_A, sign=-1): if last_A is None: return None, 0 if self.batch_dim == 0: if sign == -1: _A = last_A.clamp(min=0) * self.lw.unsqueeze(0 ) + last_A.clamp(max=0) * self.uw.unsqueeze(0) _bias = last_A.clamp(min=0) * self.lb.unsqueeze(0 ) + last_A.clamp(max=0) * self.ub.unsqueeze(0) elif sign == 1: _A = last_A.clamp(min=0) * self.uw.unsqueeze(0 ) + last_A.clamp(max=0) * self.lw.unsqueeze(0) _bias = last_A.clamp(min=0) * self.ub.unsqueeze(0 ) + last_A.clamp(max=0) * self.lb.unsqueeze(0) while _bias.ndim > 2: _bias = torch.sum(_bias, dim=-1) elif self.batch_dim == -1: mask = torch.gt(last_A, 0.0) if sign == -1: _A = last_A * (mask * self.lw.unsqueeze(0).unsqueeze(1) + (1 - mask) * self.uw.unsqueeze(0).unsqueeze(1)) _bias = last_A * (mask * self.lb.unsqueeze(0).unsqueeze (1) + (1 - mask) * self.ub.unsqueeze(0).unsqueeze(1)) elif sign == 1: _A = last_A * (mask * self.uw.unsqueeze(0).unsqueeze(1) + (1 - mask) * self.lw.unsqueeze(0).unsqueeze(1)) _bias = last_A * (mask * self.ub.unsqueeze(0).unsqueeze (1) + (1 - mask) * self.lb.unsqueeze(0).unsqueeze(1)) while _bias.ndim > 2: _bias = torch.sum(_bias, dim=-1) else: raise NotImplementedError return _A, _bias lA, lbias = _bound_oneside(last_lA, sign=-1) uA, ubias = _bound_oneside(last_uA, sign=+1) return [(lA, uA)], lbias, ubias def bound_forward(self, dim_in, x): if not self.relaxed: self._init_linear(x) self.bound_relax(x) if self.lw.ndim > 0: if x.lw is not None: lw = self.lw.unsqueeze(1).clamp(min=0 ) * x.lw + self.lw.unsqueeze(1).clamp(max=0) * x.uw uw = self.uw.unsqueeze(1).clamp(max=0 ) * x.lw + self.uw.unsqueeze(1).clamp(min=0) * x.uw else: lw = uw = None elif x.lw is not None: lw = self.lw.unsqueeze(0).clamp(min=0) * x.lw + self.lw.unsqueeze(0 ).clamp(max=0) * x.uw uw = self.uw.unsqueeze(0).clamp(min=0) * x.lw + self.uw.unsqueeze(0 ).clamp(max=0) * x.uw else: lw = uw = None lb = self.lw.clamp(min=0) * x.lb + self.lw.clamp(max=0 ) * x.ub + self.lb ub = self.uw.clamp(max=0) * x.lb + self.uw.clamp(min=0 ) * x.ub + self.ub return LinearBound(lw, lb, uw, ub) def infer_batch_dim(self, batch_size, *x): return x[0] class BoundReciprocalNew(BoundActivation): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def bound_relax(self, x): m = (x.lower + x.upper) / 2 kl = -1 / m.pow(2) self._add_linear(mask=None, type='lower', k=kl, x0=m, y0=1.0 / m) ku = -1.0 / (x.lower * x.upper) self._add_linear(mask=None, type='upper', k=ku, x0=x.lower, y0=1.0 / x.lower) def interval_propagate(self, *v): h_L, h_U = v[0] return torch.reciprocal(h_U.float()), torch.reciprocal(h_L.float()) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundReciprocal
false
13,227
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
BoundCos
from _paritybench_helpers import _mock_config import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number from torch.nn import MSELoss def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundCos(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def forward(self, x): return torch.cos(x) def infer_batch_dim(self, batch_size, *x): return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4, 'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion =MSELoss()), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.cos(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cos_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundCosNew(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def infer_batch_dim(self, batch_size, *x): return x[0] def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundCos
false
13,228
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
BoundSub
from _paritybench_helpers import _mock_config import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number from torch.nn import MSELoss def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundMul(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def forward(self, x, y): self.x_shape = x.shape self.y_shape = y.shape return x * y @staticmethod def get_bound_mul(x_l, x_u, y_l, y_u): alpha_l = y_l beta_l = x_l gamma_l = -alpha_l * beta_l alpha_u = y_u beta_u = x_l gamma_u = -alpha_u * beta_u return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u @staticmethod def get_bound_square(x_l, x_u): x_m = F.relu(x_l) - F.relu(-x_u) alpha_l = 2 * x_m gamma_l = -x_m * x_m alpha_u = x_l + x_u gamma_u = -x_l * x_u beta_l = torch.zeros_like(x_l) beta_u = beta_l return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u @staticmethod def _relax(x, y): if x is y: return BoundMul.get_bound_square(x.lower, x.upper) x_l, x_u = x.lower, x.upper y_l, y_u = y.lower, y.upper for k in [1, -1]: x_l = x_l + k * y_l x_u = x_u + k * y_u for k in [1, -1]: y_l = y_l + k * x_l y_u = y_u + k * x_u return BoundMul.get_bound_mul(x_l, x_u, y_l, y_u) def bound_backward(self, last_lA, last_uA, x, y): alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x, y) alpha_l, alpha_u = alpha_l.unsqueeze(0), alpha_u.unsqueeze(0) beta_l, beta_u = beta_l.unsqueeze(0), beta_u.unsqueeze(0) def _bound_oneside(last_A, alpha_pos, beta_pos, gamma_pos, alpha_neg, beta_neg, gamma_neg): if last_A is None: return None, None, 0 last_A_pos, last_A_neg = last_A.clamp(min=0), last_A.clamp(max=0) A_x = last_A_pos * alpha_pos + last_A_neg * alpha_neg A_y = last_A_pos * beta_pos + last_A_neg * beta_neg last_A = last_A.reshape(last_A.shape[0], last_A.shape[1], -1) A_x = self.broadcast_backward(A_x, x) A_y = self.broadcast_backward(A_y, y) bias = self.get_bias(last_A_pos, gamma_pos) + self.get_bias( last_A_neg, gamma_neg) return A_x, A_y, bias lA_x, lA_y, lbias = _bound_oneside(last_lA, alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u) uA_x, uA_y, ubias = _bound_oneside(last_uA, alpha_u, beta_u, gamma_u, alpha_l, beta_l, gamma_l) return [(lA_x, uA_x), (lA_y, uA_y)], lbias, ubias @staticmethod def bound_forward(dim_in, x, y): x_lw, x_lb, x_uw, x_ub = x.lw, x.lb, x.uw, x.ub y_lw, y_lb, y_uw, y_ub = y.lw, y.lb, y.uw, y.ub alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x, y) if x_lw is None: x_lw = 0 if y_lw is None: y_lw = 0 if x_uw is None: x_uw = 0 if y_uw is None: y_uw = 0 lw = alpha_l.unsqueeze(1).clamp(min=0) * x_lw + alpha_l.unsqueeze(1 ).clamp(max=0) * x_uw lw = lw + beta_l.unsqueeze(1).clamp(min=0) * y_lw + beta_l.unsqueeze(1 ).clamp(max=0) * y_uw lb = alpha_l.clamp(min=0) * x_lb + alpha_l.clamp(max=0 ) * x_ub + beta_l.clamp(min=0) * y_lb + beta_l.clamp(max=0 ) * y_ub + gamma_l uw = alpha_u.unsqueeze(1).clamp(max=0) * x_lw + alpha_u.unsqueeze(1 ).clamp(min=0) * x_uw uw = uw + beta_u.unsqueeze(1).clamp(max=0) * y_lw + beta_u.unsqueeze(1 ).clamp(min=0) * y_uw ub = alpha_u.clamp(max=0) * x_lb + alpha_u.clamp(min=0 ) * x_ub + beta_u.clamp(max=0) * y_lb + beta_u.clamp(min=0 ) * y_ub + gamma_u return LinearBound(lw, lb, uw, ub) @staticmethod def interval_propagate(*v): x, y = v[0], v[1] if x is y: h_L, h_U = v[0] r0 = h_L * h_L r1 = h_U * h_U l = F.relu(h_L) - F.relu(-h_U) return l * l, torch.max(r0, r1) r0, r1, r2, r3 = x[0] * y[0], x[0] * y[1], x[1] * y[0], x[1] * y[1] lower = torch.min(torch.min(r0, r1), torch.min(r2, r3)) upper = torch.max(torch.max(r0, r1), torch.max(r2, r3)) return lower, upper @staticmethod def infer_batch_dim(batch_size, *x): if x[0] == -1: return x[1] elif x[1] == -1: return x[0] else: assert x[0] == x[1] return x[0] class BoundSub(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def forward(self, x, y): self.x_shape = x.shape self.y_shape = y.shape return x - y def bound_backward(self, last_lA, last_uA, x, y): def _bound_oneside(last_A, w, sign=-1): if last_A is None: return None return self.broadcast_backward(sign * last_A, w) uA_x = _bound_oneside(last_uA, x, sign=1) uA_y = _bound_oneside(last_uA, y, sign=-1) lA_x = _bound_oneside(last_lA, x, sign=1) lA_y = _bound_oneside(last_lA, y, sign=-1) return [(lA_x, uA_x), (lA_y, uA_y)], 0, 0 def bound_forward(self, dim_in, x, y): x_lw, x_lb, x_uw, x_ub = Bound.broadcast_forward(dim_in, x, self. default_shape) y_lw, y_lb, y_uw, y_ub = Bound.broadcast_forward(dim_in, y, self. default_shape) lw, lb = x_lw - y_uw, x_lb - y_ub uw, ub = x_uw - y_lw, x_ub - y_lb return LinearBound(lw, lb, uw, ub) def interval_propagate(self, x, y): return x[0] - y[1], x[1] - y[0] def infer_batch_dim(self, batch_size, *x): return BoundMul.infer_batch_dim(batch_size, *x) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4, 'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion =MSELoss()), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundMul(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def forward(self, x, y): self.x_shape = x.shape self.y_shape = y.shape return x * y @staticmethod def get_bound_mul(x_l, x_u, y_l, y_u): alpha_l = y_l beta_l = x_l gamma_l = -alpha_l * beta_l alpha_u = y_u beta_u = x_l gamma_u = -alpha_u * beta_u return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u @staticmethod def get_bound_square(x_l, x_u): x_m = F.relu(x_l) - F.relu(-x_u) alpha_l = 2 * x_m gamma_l = -x_m * x_m alpha_u = x_l + x_u gamma_u = -x_l * x_u beta_l = torch.zeros_like(x_l) beta_u = beta_l return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u @staticmethod def _relax(x, y): if x is y: return BoundMul.get_bound_square(x.lower, x.upper) x_l, x_u = x.lower, x.upper y_l, y_u = y.lower, y.upper for k in [1, -1]: x_l = x_l + k * y_l x_u = x_u + k * y_u for k in [1, -1]: y_l = y_l + k * x_l y_u = y_u + k * x_u return BoundMul.get_bound_mul(x_l, x_u, y_l, y_u) def bound_backward(self, last_lA, last_uA, x, y): alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x, y) alpha_l, alpha_u = alpha_l.unsqueeze(0), alpha_u.unsqueeze(0) beta_l, beta_u = beta_l.unsqueeze(0), beta_u.unsqueeze(0) def _bound_oneside(last_A, alpha_pos, beta_pos, gamma_pos, alpha_neg, beta_neg, gamma_neg): if last_A is None: return None, None, 0 last_A_pos, last_A_neg = last_A.clamp(min=0), last_A.clamp(max=0) A_x = last_A_pos * alpha_pos + last_A_neg * alpha_neg A_y = last_A_pos * beta_pos + last_A_neg * beta_neg last_A = last_A.reshape(last_A.shape[0], last_A.shape[1], -1) A_x = self.broadcast_backward(A_x, x) A_y = self.broadcast_backward(A_y, y) bias = self.get_bias(last_A_pos, gamma_pos) + self.get_bias( last_A_neg, gamma_neg) return A_x, A_y, bias lA_x, lA_y, lbias = _bound_oneside(last_lA, alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u) uA_x, uA_y, ubias = _bound_oneside(last_uA, alpha_u, beta_u, gamma_u, alpha_l, beta_l, gamma_l) return [(lA_x, uA_x), (lA_y, uA_y)], lbias, ubias @staticmethod def bound_forward(dim_in, x, y): x_lw, x_lb, x_uw, x_ub = x.lw, x.lb, x.uw, x.ub y_lw, y_lb, y_uw, y_ub = y.lw, y.lb, y.uw, y.ub alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x, y) if x_lw is None: x_lw = 0 if y_lw is None: y_lw = 0 if x_uw is None: x_uw = 0 if y_uw is None: y_uw = 0 lw = alpha_l.unsqueeze(1).clamp(min=0) * x_lw + alpha_l.unsqueeze(1 ).clamp(max=0) * x_uw lw = lw + beta_l.unsqueeze(1).clamp(min=0) * y_lw + beta_l.unsqueeze(1 ).clamp(max=0) * y_uw lb = alpha_l.clamp(min=0) * x_lb + alpha_l.clamp(max=0 ) * x_ub + beta_l.clamp(min=0) * y_lb + beta_l.clamp(max=0 ) * y_ub + gamma_l uw = alpha_u.unsqueeze(1).clamp(max=0) * x_lw + alpha_u.unsqueeze(1 ).clamp(min=0) * x_uw uw = uw + beta_u.unsqueeze(1).clamp(max=0) * y_lw + beta_u.unsqueeze(1 ).clamp(min=0) * y_uw ub = alpha_u.clamp(max=0) * x_lb + alpha_u.clamp(min=0 ) * x_ub + beta_u.clamp(max=0) * y_lb + beta_u.clamp(min=0 ) * y_ub + gamma_u return LinearBound(lw, lb, uw, ub) @staticmethod def interval_propagate(*v): x, y = v[0], v[1] if x is y: h_L, h_U = v[0] r0 = h_L * h_L r1 = h_U * h_U l = F.relu(h_L) - F.relu(-h_U) return l * l, torch.max(r0, r1) r0, r1, r2, r3 = x[0] * y[0], x[0] * y[1], x[1] * y[0], x[1] * y[1] lower = torch.min(torch.min(r0, r1), torch.min(r2, r3)) upper = torch.max(torch.max(r0, r1), torch.max(r2, r3)) return lower, upper @staticmethod def infer_batch_dim(batch_size, *x): if x[0] == -1: return x[1] elif x[1] == -1: return x[0] else: assert x[0] == x[1] return x[0] class BoundSubNew(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def bound_backward(self, last_lA, last_uA, x, y): def _bound_oneside(last_A, w, sign=-1): if last_A is None: return None return self.broadcast_backward(sign * last_A, w) uA_x = _bound_oneside(last_uA, x, sign=1) uA_y = _bound_oneside(last_uA, y, sign=-1) lA_x = _bound_oneside(last_lA, x, sign=1) lA_y = _bound_oneside(last_lA, y, sign=-1) return [(lA_x, uA_x), (lA_y, uA_y)], 0, 0 def bound_forward(self, dim_in, x, y): x_lw, x_lb, x_uw, x_ub = Bound.broadcast_forward(dim_in, x, self. default_shape) y_lw, y_lb, y_uw, y_ub = Bound.broadcast_forward(dim_in, y, self. default_shape) lw, lb = x_lw - y_uw, x_lb - y_ub uw, ub = x_uw - y_lw, x_ub - y_lb return LinearBound(lw, lb, uw, ub) def interval_propagate(self, x, y): return x[0] - y[1], x[1] - y[0] def infer_batch_dim(self, batch_size, *x): return BoundMul.infer_batch_dim(batch_size, *x) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundSub
false
13,229
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
BoundEqual
from _paritybench_helpers import _mock_config import math import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number from torch.nn import MSELoss def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundMul(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def forward(self, x, y): self.x_shape = x.shape self.y_shape = y.shape return x * y @staticmethod def get_bound_mul(x_l, x_u, y_l, y_u): alpha_l = y_l beta_l = x_l gamma_l = -alpha_l * beta_l alpha_u = y_u beta_u = x_l gamma_u = -alpha_u * beta_u return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u @staticmethod def get_bound_square(x_l, x_u): x_m = F.relu(x_l) - F.relu(-x_u) alpha_l = 2 * x_m gamma_l = -x_m * x_m alpha_u = x_l + x_u gamma_u = -x_l * x_u beta_l = torch.zeros_like(x_l) beta_u = beta_l return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u @staticmethod def _relax(x, y): if x is y: return BoundMul.get_bound_square(x.lower, x.upper) x_l, x_u = x.lower, x.upper y_l, y_u = y.lower, y.upper for k in [1, -1]: x_l = x_l + k * y_l x_u = x_u + k * y_u for k in [1, -1]: y_l = y_l + k * x_l y_u = y_u + k * x_u return BoundMul.get_bound_mul(x_l, x_u, y_l, y_u) def bound_backward(self, last_lA, last_uA, x, y): alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x, y) alpha_l, alpha_u = alpha_l.unsqueeze(0), alpha_u.unsqueeze(0) beta_l, beta_u = beta_l.unsqueeze(0), beta_u.unsqueeze(0) def _bound_oneside(last_A, alpha_pos, beta_pos, gamma_pos, alpha_neg, beta_neg, gamma_neg): if last_A is None: return None, None, 0 last_A_pos, last_A_neg = last_A.clamp(min=0), last_A.clamp(max=0) A_x = last_A_pos * alpha_pos + last_A_neg * alpha_neg A_y = last_A_pos * beta_pos + last_A_neg * beta_neg last_A = last_A.reshape(last_A.shape[0], last_A.shape[1], -1) A_x = self.broadcast_backward(A_x, x) A_y = self.broadcast_backward(A_y, y) bias = self.get_bias(last_A_pos, gamma_pos) + self.get_bias( last_A_neg, gamma_neg) return A_x, A_y, bias lA_x, lA_y, lbias = _bound_oneside(last_lA, alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u) uA_x, uA_y, ubias = _bound_oneside(last_uA, alpha_u, beta_u, gamma_u, alpha_l, beta_l, gamma_l) return [(lA_x, uA_x), (lA_y, uA_y)], lbias, ubias @staticmethod def bound_forward(dim_in, x, y): x_lw, x_lb, x_uw, x_ub = x.lw, x.lb, x.uw, x.ub y_lw, y_lb, y_uw, y_ub = y.lw, y.lb, y.uw, y.ub alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x, y) if x_lw is None: x_lw = 0 if y_lw is None: y_lw = 0 if x_uw is None: x_uw = 0 if y_uw is None: y_uw = 0 lw = alpha_l.unsqueeze(1).clamp(min=0) * x_lw + alpha_l.unsqueeze(1 ).clamp(max=0) * x_uw lw = lw + beta_l.unsqueeze(1).clamp(min=0) * y_lw + beta_l.unsqueeze(1 ).clamp(max=0) * y_uw lb = alpha_l.clamp(min=0) * x_lb + alpha_l.clamp(max=0 ) * x_ub + beta_l.clamp(min=0) * y_lb + beta_l.clamp(max=0 ) * y_ub + gamma_l uw = alpha_u.unsqueeze(1).clamp(max=0) * x_lw + alpha_u.unsqueeze(1 ).clamp(min=0) * x_uw uw = uw + beta_u.unsqueeze(1).clamp(max=0) * y_lw + beta_u.unsqueeze(1 ).clamp(min=0) * y_uw ub = alpha_u.clamp(max=0) * x_lb + alpha_u.clamp(min=0 ) * x_ub + beta_u.clamp(max=0) * y_lb + beta_u.clamp(min=0 ) * y_ub + gamma_u return LinearBound(lw, lb, uw, ub) @staticmethod def interval_propagate(*v): x, y = v[0], v[1] if x is y: h_L, h_U = v[0] r0 = h_L * h_L r1 = h_U * h_U l = F.relu(h_L) - F.relu(-h_U) return l * l, torch.max(r0, r1) r0, r1, r2, r3 = x[0] * y[0], x[0] * y[1], x[1] * y[0], x[1] * y[1] lower = torch.min(torch.min(r0, r1), torch.min(r2, r3)) upper = torch.max(torch.max(r0, r1), torch.max(r2, r3)) return lower, upper @staticmethod def infer_batch_dim(batch_size, *x): if x[0] == -1: return x[1] elif x[1] == -1: return x[0] else: assert x[0] == x[1] return x[0] class BoundEqual(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def forward(self, x, y): return x == y def infer_batch_dim(self, batch_size, *x): return BoundMul.infer_batch_dim(batch_size, *x) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4, 'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion =MSELoss()), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np import torch.nn as nn import torch.nn.functional as F from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def isnan(x): if isinstance(x, Patches): return False return torch.isnan(x).any() class Perturbation: def __init__(self): pass def set_eps(self, eps): self.eps = eps def concretize(self, x, A, sign=-1, aux=None): raise NotImplementedError def init(self, x, aux=None, forward=False): raise NotImplementedError class PerturbationL0Norm(Perturbation): def __init__(self, eps, x_L=None, x_U=None, ratio=1.0): self.eps = eps self.x_U = x_U self.x_L = x_L self.ratio = ratio def concretize(self, x, A, sign=-1, aux=None): if A is None: return None eps = math.ceil(self.eps) x = x.reshape(x.shape[0], -1, 1) center = A.matmul(x) x = x.reshape(x.shape[0], 1, -1) original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2]) neg_mask = A < 0 pos_mask = A >= 0 if sign == 1: A_diff = torch.zeros_like(A) A_diff[pos_mask] = A[pos_mask] - original[pos_mask] A_diff[neg_mask] = -original[neg_mask] else: A_diff = torch.zeros_like(A) A_diff[pos_mask] = original[pos_mask] A_diff[neg_mask] = original[neg_mask] - A[neg_mask] A_diff, _ = torch.sort(A_diff, dim=2, descending=True) bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2 ) * self.ratio return bound.squeeze(2) def init(self, x, aux=None, forward=False): x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps) class PerturbationLpNorm(Perturbation): def __init__(self, eps, norm=np.inf, x_L=None, x_U=None): if not isinstance(eps, Number): if not isinstance(eps, torch.Tensor): self.eps = torch.tensor(eps) else: self.eps = eps if len(self.eps.shape) == 1: self.eps = torch.diag(self.eps) assert self.eps.shape[0] == self.eps.shape[1 ], 'Argument [eps] must form a n by n square matrix.' self.norm = 2 else: self.eps = eps self.norm = norm self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 - 1.0 / self.norm) self.x_L = x_L self.x_U = x_U """Given an variable x and its bound matrix A, compute worst case bound according to Lp norm.""" def concretize(self, x, A, sign=-1, aux=None): if A is None: return None def concretize_matrix(A): nonlocal x if not isinstance(A, eyeC): A = A.reshape(A.shape[0], A.shape[1], -1) if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U x_ub = x_U.reshape(x_U.shape[0], -1, 1) x_lb = x_L.reshape(x_L.shape[0], -1, 1) center = (x_ub + x_lb) / 2.0 diff = (x_ub - x_lb) / 2.0 if not isinstance(A, eyeC): bound = A.matmul(center) + sign * A.abs().matmul(diff) else: bound = center + sign * diff else: x = x.reshape(x.shape[0], -1, 1) if not isinstance(A, eyeC): if isinstance(self.eps, Number): deviation = A.norm(self.dual_norm, -1) * self.eps else: deviation = A.matmul(self.eps.transpose(0, 1)).norm( self.dual_norm, -1) bound = A.matmul(x) + sign * deviation.unsqueeze(-1) elif isinstance(self.eps, Number): bound = x + sign * self.eps else: bound = x + sign * self.eps.transpose(0, 1).norm(self. dual_norm, -1) bound = bound.squeeze(-1) return bound def concretize_patches(A): nonlocal x if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U center = (x_U + x_L) / 2.0 diff = (x_U - x_L) / 2.0 if not A.identity == 1: unfold_input = F.unfold(center, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound = prod.view(prod.size(0), prod.size(1), int(math. sqrt(prod.size(2))), int(math.sqrt(prod.size(2)))) unfold_input = F.unfold(diff, kernel_size=A.patches. size(-1), padding=A.padding, stride=A.stride ).transpose(-2, -1) unfold_input = unfold_input.view(unfold_input.size(0), unfold_input.size(1), -1, A.patches.size(-3), A. patches.size(-2), A.patches.size(-1)) prod = unfold_input * A.patches.abs() prod = prod.sum((-1, -2, -3)).transpose(-2, -1) bound += sign * prod.view(prod.size(0), prod.size(1), int(math.sqrt(prod.size(2))), int(math.sqrt(prod. size(2)))) else: bound = center + sign * diff return bound else: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U raise NotImplementedError() if isinstance(A, eyeC) or isinstance(A, torch.Tensor): return concretize_matrix(A) elif isinstance(A, Patches): return concretize_patches(A) elif isinstance(A, BoundList): for b in A.bound_list: if isinstance(b, eyeC) or isinstance(b, torch.Tensor): pass else: raise NotImplementedError() def init(self, x, aux=None, forward=False): if self.norm == np.inf: x_L = x - self.eps if self.x_L is None else self.x_L x_U = x + self.eps if self.x_U is None else self.x_U else: x_L = x x_U = x if not forward: return LinearBound(None, None, None, None, x_L, x_U), x, None batch_size = x.shape[0] dim = x.reshape(batch_size, -1).shape[-1] eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1) lw = eye.reshape(batch_size, dim, *x.shape[1:]) lb = torch.zeros_like(x) uw, ub = lw.clone(), lb.clone() return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None def __repr__(self): if self.norm == np.inf: if self.x_L is None and self.x_U is None: return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps) else: return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})' .format(self.eps, self.x_L, self.x_U)) else: return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm, self.eps) class PerturbationSynonym(Perturbation): def __init__(self, budget, eps=1.0, use_simple=False): super(PerturbationSynonym, self).__init__() self._load_synonyms() self.budget = budget self.eps = eps self.use_simple = use_simple self.model = None self.train = False def __repr__(self): return ( 'perturbation(Synonym-based word substitution budget={}, eps={})' .format(self.budget, self.eps)) def _load_synonyms(self, path='data/synonyms.json'): with open(path) as file: self.synonym = json.loads(file.read()) logger.info('Synonym list loaded for {} words'.format(len(self. synonym))) def set_train(self, train): self.train = train def concretize(self, x, A, sign, aux): assert self.model is not None x_rep, mask, can_be_replaced = aux batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] dim_out = A.shape[1] max_num_cand = x_rep.shape[2] mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32, device=A.device) num_pos = int(np.max(np.sum(can_be_replaced, axis=-1))) update_A = A.shape[-1] > num_pos * dim_word if update_A: bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape( batch_size, -1, 1)).squeeze(-1) else: bias = 0.0 A = A.reshape(batch_size, dim_out, -1, dim_word) A_new, x_new, x_rep_new, mask_new = [], [], [], [] zeros_A = torch.zeros(dim_out, dim_word, device=A.device) zeros_w = torch.zeros(dim_word, device=A.device) zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device) zeros_mask = torch.zeros(max_num_cand, device=A.device) for t in range(batch_size): cnt = 0 for i in range(0, length): if can_be_replaced[t][i]: if update_A: A_new.append(A[t, :, i, :]) x_new.append(x[t][i]) x_rep_new.append(x_rep[t][i]) mask_new.append(mask[t][i]) cnt += 1 if update_A: A_new += [zeros_A] * (num_pos - cnt) x_new += [zeros_w] * (num_pos - cnt) x_rep_new += [zeros_rep] * (num_pos - cnt) mask_new += [zeros_mask] * (num_pos - cnt) if update_A: A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word ).transpose(1, 2) x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word) x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos, max_num_cand, dim_word) mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand) length = num_pos A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2) x = x.reshape(batch_size, length, -1, 1) if sign == 1: cmp, init = torch.max, -1e+30 else: cmp, init = torch.min, 1e+30 init_tensor = torch.ones(batch_size, dim_out) * init dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1) ] dp[0][0] = torch.zeros(batch_size, dim_out) A = A.reshape(batch_size * length, A.shape[2], A.shape[3]) Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x. shape[3])).reshape(batch_size, length, A.shape[1]) Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length, max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size, length, A.shape[1], max_num_cand) Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2) Ax_rep_bound = cmp(Ax_rep, dim=-1).values if self.use_simple and self.train: return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias for i in range(1, length + 1): dp[i][0] = dp[i - 1][0] + Ax[:, i - 1] for j in range(1, self.budget + 1): dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1 ] + Ax_rep_bound[:, i - 1]) dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1, batch_size, dim_out) return cmp(dp, dim=0).values + bias def init(self, x, aux=None, forward=False): tokens, batch = aux self.tokens = tokens assert len(x.shape) == 3 batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2] max_pos = 1 can_be_replaced = np.zeros((batch_size, length), dtype=np.bool) self._build_substitution(batch) for t in range(batch_size): cnt = 0 candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] for i in range(len(tokens[t])): if tokens[t][i] == '[UNK]' or len(candidates[i] ) == 0 or tokens[t][i] != candidates[i][0]: continue for w in candidates[i][1:]: if w in self.model.vocab: can_be_replaced[t][i] = True cnt += 1 break max_pos = max(max_pos, cnt) dim = max_pos * dim_word if forward: eye = torch.eye(dim_word) lw = torch.zeros(batch_size, dim, length, dim_word) lb = torch.zeros_like(x) word_embeddings = self.model.word_embeddings.weight vocab = self.model.vocab x_rep = [[[] for i in range(length)] for t in range(batch_size)] max_num_cand = 1 for t in range(batch_size): candidates = batch[t]['candidates'] if tokens[t][0] == '[CLS]': candidates = [[]] + candidates + [[]] cnt = 0 for i in range(length): if can_be_replaced[t][i]: word_embed = word_embeddings[vocab[tokens[t][i]]] other_embed = x[t, i] - word_embed if forward: lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye lb[t, i, :] = torch.zeros_like(word_embed) for w in candidates[i][1:]: if w in self.model.vocab: x_rep[t][i].append(word_embeddings[self.model. vocab[w]] + other_embed) max_num_cand = max(max_num_cand, len(x_rep[t][i])) cnt += 1 elif forward: lb[t, i, :] = x[t, i, :] if forward: uw, ub = lw, lb else: lw = lb = uw = ub = None zeros = torch.zeros(dim_word, device=x.device) x_rep_, mask = [], [] for t in range(batch_size): for i in range(length): x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep [t][i])) mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len( x_rep[t][i])) x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand, dim_word) mask = torch.tensor(mask, dtype=torch.float32, device=x.device ).reshape(batch_size, length, max_num_cand) x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps) inf = 1e+20 lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * inf, dim=2).values upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask). unsqueeze(-1) * -inf, dim=2).values lower = torch.min(lower, x) upper = torch.max(upper, x) return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask, can_be_replaced) def _build_substitution(self, batch): for t, example in enumerate(batch): if 'candidates' not in example or example['candidates'] is None: candidates = [] tokens = example['sentence'].strip().lower().split(' ') for i in range(len(tokens)): _cand = [] if tokens[i] in self.synonym: for w in self.synonym[tokens[i]]: if w in self.model.vocab: _cand.append(w) if len(_cand) > 0: _cand = [tokens[i]] + _cand candidates.append(_cand) example['candidates'] = candidates class Interval(tuple): def __new__(self, lb=None, ub=None, ptb=None): if ub is None: assert isinstance(lb, tuple) lb, ub = lb return tuple.__new__(Interval, (lb, ub)) def __init__(self, lb, ub, ptb=None): if ptb is None: self.ptb = None assert lb is ub elif not isinstance(ptb, Perturbation): raise ValueError( 'ptb must be a Perturbation object or None. Got type {}'. format(type(ptb))) else: self.ptb = ptb def __str__(self): return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb) def __repr__(self): return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1], self.ptb) """Checking if the other interval is tuple, keep the perturbation.""" @staticmethod def make_interval(lb, ub, other): if isinstance(other, Interval): return Interval(lb, ub, other.ptb) else: return lb, ub """Given a tuple or Interval object, returns the norm and eps.""" @staticmethod def get_perturbation(interval): if isinstance(interval, Interval): if isinstance(interval.ptb, PerturbationLpNorm): return interval.ptb.norm, interval.ptb.eps elif isinstance(interval.ptb, PerturbationSynonym): return np.inf, 1.0 elif isinstance(interval.ptb, PerturbationL0Norm): return 0, interval.ptb.eps, interval.ptb.ratio elif interval.ptb is None: raise RuntimeError( 'get_perturbation() encountered an interval that is not perturbed.' ) else: raise RuntimeError( 'get_perturbation() does not know how to handle {}'. format(type(interval.ptb))) else: return np.inf, np.nan """Checking if a Interval or tuple object has perturbation enabled.""" @staticmethod def is_perturbed(interval): if isinstance(interval, Interval) and interval.ptb is None: return False else: return True class Bound(nn.Module): def __init__(self, input_name, name, ori_name, attr={}, inputs=[], output_index=0, options={}, device=None): super().__init__() self.output_name = [] (self.input_name, self.name, self.ori_name, self.attr, self.inputs, self.output_index, self.options, self.device) = (input_name, name, ori_name, attr, inputs, output_index, options, device) self.fv = None self.from_input = False self.bounded = False self.IBP_rets = None self.perturbed = False if options is not None and 'loss_fusion' in options: self.loss_fusion = options['loss_fusion'] else: self.loss_fusion = False """Check if the i-th input is with perturbation or not.""" def is_input_perturbed(self, i=0): return self.inputs[i].perturbed def forward(self, *x): raise NotImplementedError def interval_propagate(self, *v): assert len(v) == 1 h_L, h_U = v[0] return Interval.make_interval(self.forward(h_L), self.forward(h_U), v[0]) def bound_forward(self, dim_in, last): raise NotImplementedError def bound_backward(self, last_lA, last_uA): raise NotImplementedError def infer_batch_dim(self, batch_size, *x): None raise NotImplementedError def broadcast_backward(self, A, x): shape = x.default_shape batch_dim = max(self.batch_dim, 0) if isinstance(A, torch.Tensor): if x.batch_dim == -1: shape = torch.Size([A.shape[batch_dim + 1]] + list(shape)) dims = [] cnt_sum = A.ndim - len(shape) - 1 for i in range(1, A.ndim): if i != self.batch_dim + 1 and cnt_sum > 0: dims.append(i) cnt_sum -= 1 if dims: A = torch.sum(A, dim=dims) else: dims = list(range(1, 1 + A.ndim - 1 - len(shape))) if dims: A = torch.sum(A, dim=dims) dims = [] for i in range(len(shape)): if shape[i] == 1 and A.shape[i + 1] != 1: dims.append(i + 1) if dims: A = torch.sum(A, dim=dims, keepdim=True) assert A.shape[1:] == shape elif type(A) == Patches: pass return A @staticmethod def broadcast_forward(dim_in, x, shape_res): lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub shape_x, shape_res = list(x.lb.shape), list(shape_res) if lw is None: lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device) has_batch_size = False else: has_batch_size = True while len(shape_x) < len(shape_res): if not has_batch_size: lw, uw = lw.unsqueeze(0), uw.unsqueeze(0) lb, ub = lb.unsqueeze(0), ub.unsqueeze(0) shape_x = [1] + shape_x has_batch_size = True else: lw, uw = lw.unsqueeze(2), uw.unsqueeze(2) lb, ub = lb.unsqueeze(1), ub.unsqueeze(1) shape_x = [shape_x[0], 1] + shape_x[1:] repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))] lb, ub = lb.repeat(*repeat), ub.repeat(*repeat) repeat = repeat[:1] + [1] + repeat[1:] lw, uw = lw.repeat(*repeat), uw.repeat(*repeat) return lw, lb, uw, ub def get_bias(self, A, bias): if A is None: return 0 assert not isnan(A) assert not isnan(bias) if isinstance(A, torch.Tensor): if torch.norm(A, p=1) < epsilon: return 0 output_dim = A.shape[0] if self.batch_dim != -1: batch_size = A.shape[self.batch_dim + 1] A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1 ]).astype(np.int32), batch_size, np.prod(A.shape[self. batch_dim + 2:]).astype(np.int32)] A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size, output_dim, -1) bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape( batch_size, -1, 1) bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1) else: batch_size = A.shape[1] A = A.view(output_dim, batch_size, -1) bias_new = A.matmul(bias.view(-1)) if isnan(bias_new): return 0 else: return bias_new elif type(A) == Patches: if torch.norm(A.patches, p=1) < epsilon: return 0 if self.batch_dim != -1: batch_size = bias.shape[0] bias = F.unfold(bias, kernel_size=A.patches.size(-1), stride=A.stride, padding=A.padding).transpose(-2, -1 ).unsqueeze(-2) bias.size(1) patches = A.patches.view(A.patches.size(0), A.patches.size( 1), A.patches.size(-4), A.patches.size(-1) * A.patches. size(-2) * A.patches.size(-3)) prod = bias * patches bias_new = prod.sum(-1).transpose(-2, -1) bias_new = bias_new.view(batch_size, bias_new.size(-2), int (math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new. size(-1)))) else: patches = A.patches patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias patches_reshape = patches_reshape.transpose(-1, -2) return patches_reshape.view(patches_reshape.size(0), patches_reshape.size(1), int(math.sqrt(patches_reshape. size(2))), -1).transpose(0, 1) return bias_new else: return NotImplementedError() class BoundMul(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) self.nonlinear = True def forward(self, x, y): self.x_shape = x.shape self.y_shape = y.shape return x * y @staticmethod def get_bound_mul(x_l, x_u, y_l, y_u): alpha_l = y_l beta_l = x_l gamma_l = -alpha_l * beta_l alpha_u = y_u beta_u = x_l gamma_u = -alpha_u * beta_u return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u @staticmethod def get_bound_square(x_l, x_u): x_m = F.relu(x_l) - F.relu(-x_u) alpha_l = 2 * x_m gamma_l = -x_m * x_m alpha_u = x_l + x_u gamma_u = -x_l * x_u beta_l = torch.zeros_like(x_l) beta_u = beta_l return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u @staticmethod def _relax(x, y): if x is y: return BoundMul.get_bound_square(x.lower, x.upper) x_l, x_u = x.lower, x.upper y_l, y_u = y.lower, y.upper for k in [1, -1]: x_l = x_l + k * y_l x_u = x_u + k * y_u for k in [1, -1]: y_l = y_l + k * x_l y_u = y_u + k * x_u return BoundMul.get_bound_mul(x_l, x_u, y_l, y_u) def bound_backward(self, last_lA, last_uA, x, y): alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x, y) alpha_l, alpha_u = alpha_l.unsqueeze(0), alpha_u.unsqueeze(0) beta_l, beta_u = beta_l.unsqueeze(0), beta_u.unsqueeze(0) def _bound_oneside(last_A, alpha_pos, beta_pos, gamma_pos, alpha_neg, beta_neg, gamma_neg): if last_A is None: return None, None, 0 last_A_pos, last_A_neg = last_A.clamp(min=0), last_A.clamp(max=0) A_x = last_A_pos * alpha_pos + last_A_neg * alpha_neg A_y = last_A_pos * beta_pos + last_A_neg * beta_neg last_A = last_A.reshape(last_A.shape[0], last_A.shape[1], -1) A_x = self.broadcast_backward(A_x, x) A_y = self.broadcast_backward(A_y, y) bias = self.get_bias(last_A_pos, gamma_pos) + self.get_bias( last_A_neg, gamma_neg) return A_x, A_y, bias lA_x, lA_y, lbias = _bound_oneside(last_lA, alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u) uA_x, uA_y, ubias = _bound_oneside(last_uA, alpha_u, beta_u, gamma_u, alpha_l, beta_l, gamma_l) return [(lA_x, uA_x), (lA_y, uA_y)], lbias, ubias @staticmethod def bound_forward(dim_in, x, y): x_lw, x_lb, x_uw, x_ub = x.lw, x.lb, x.uw, x.ub y_lw, y_lb, y_uw, y_ub = y.lw, y.lb, y.uw, y.ub alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x, y) if x_lw is None: x_lw = 0 if y_lw is None: y_lw = 0 if x_uw is None: x_uw = 0 if y_uw is None: y_uw = 0 lw = alpha_l.unsqueeze(1).clamp(min=0) * x_lw + alpha_l.unsqueeze(1 ).clamp(max=0) * x_uw lw = lw + beta_l.unsqueeze(1).clamp(min=0) * y_lw + beta_l.unsqueeze(1 ).clamp(max=0) * y_uw lb = alpha_l.clamp(min=0) * x_lb + alpha_l.clamp(max=0 ) * x_ub + beta_l.clamp(min=0) * y_lb + beta_l.clamp(max=0 ) * y_ub + gamma_l uw = alpha_u.unsqueeze(1).clamp(max=0) * x_lw + alpha_u.unsqueeze(1 ).clamp(min=0) * x_uw uw = uw + beta_u.unsqueeze(1).clamp(max=0) * y_lw + beta_u.unsqueeze(1 ).clamp(min=0) * y_uw ub = alpha_u.clamp(max=0) * x_lb + alpha_u.clamp(min=0 ) * x_ub + beta_u.clamp(max=0) * y_lb + beta_u.clamp(min=0 ) * y_ub + gamma_u return LinearBound(lw, lb, uw, ub) @staticmethod def interval_propagate(*v): x, y = v[0], v[1] if x is y: h_L, h_U = v[0] r0 = h_L * h_L r1 = h_U * h_U l = F.relu(h_L) - F.relu(-h_U) return l * l, torch.max(r0, r1) r0, r1, r2, r3 = x[0] * y[0], x[0] * y[1], x[1] * y[0], x[1] * y[1] lower = torch.min(torch.min(r0, r1), torch.min(r2, r3)) upper = torch.max(torch.max(r0, r1), torch.max(r2, r3)) return lower, upper @staticmethod def infer_batch_dim(batch_size, *x): if x[0] == -1: return x[1] elif x[1] == -1: return x[0] else: assert x[0] == x[1] return x[0] class BoundEqualNew(Bound): def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device): super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device) def infer_batch_dim(self, batch_size, *x): return BoundMul.infer_batch_dim(batch_size, *x) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundEqual
false
13,230
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
MMFB
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, groups=3): super(ConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=1, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class ConvBlockD(nn.Module): def __init__(self, in_channels, out_channels, groups=3, ker_size=2): super(ConvBlockD, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=ker_size, dilation=ker_size, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class MIRB1(nn.Module): def __init__(self, args): super(MIRB1, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlock(args.n_feats, self.c_out) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlock(args.n_feats, self.c_out) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlock(args.n_feats, self.c_out) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out class MIRB2(nn.Module): def __init__(self, args): super(MIRB2, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out class MIRB3(nn.Module): def __init__(self, args): super(MIRB3, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out class MMFB(nn.Module): def __init__(self, args): super(MMFB, self).__init__() def wn(x): return torch.nn.utils.weight_norm(x) self.bs1 = MIRB1(args) self.bs11 = MIRB1(args) self.bs2 = MIRB2(args) self.bs22 = MIRB2(args) self.bs3 = MIRB3(args) self.bs33 = MIRB3(args) def forward(self, x): res = x res = self.bs1(res) res = self.bs11(res) res = self.bs2(res) res = self.bs22(res) res = self.bs3(res) res = self.bs33(res) out = res + x return out def get_inputs(): return [torch.rand([4, 18, 64, 64])] def get_init_inputs(): return [[], {'args': _mock_config(n_feats=18)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.sqrt(tmp16) tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 108 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 6 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 18 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask) @triton.jit def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 9 rnumel = 18 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 18 x0 = xindex % 4096 x2 = xindex // 73728 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 9, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 36864 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 18, tl.int64) tmp18 = tl.load(in_ptr2 + (x0 + 4096 * (-9 + x1) + 36864 * x2), tmp15, other=0.0) tmp19 = tl.load(in_ptr3 + (-9 + x1), tmp15, eviction_policy= 'evict_last', other=0.0) tmp20 = tmp18 + tmp19 tmp21 = tmp20 > tmp8 tmp22 = tmp20 * tmp10 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp15, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp14, tmp25) tl.store(out_ptr0 + x3, tmp26, None) @triton.jit def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 18 rnumel = 18 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 18 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp5 = tl.load(in_ptr2 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 9 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185, primals_186, primals_187, primals_188, primals_189, primals_190, primals_191, primals_192, primals_193, primals_194, primals_195, primals_196, primals_197, primals_198, primals_199, primals_200, primals_201, primals_202, primals_203, primals_204, primals_205, primals_206, primals_207, primals_208, primals_209, primals_210, primals_211, primals_212, primals_213, primals_214, primals_215, primals_216, primals_217, primals_218, primals_219, primals_220, primals_221, primals_222, primals_223, primals_224, primals_225, primals_226, primals_227, primals_228, primals_229, primals_230, primals_231, primals_232, primals_233, primals_234, primals_235, primals_236, primals_237, primals_238, primals_239, primals_240, primals_241, primals_242, primals_243, primals_244, primals_245, primals_246, primals_247, primals_248, primals_249, primals_250, primals_251, primals_252, primals_253, primals_254, primals_255, primals_256, primals_257, primals_258, primals_259, primals_260, primals_261, primals_262, primals_263, primals_264, primals_265, primals_266, primals_267, primals_268, primals_269, primals_270, primals_271, primals_272, primals_273, primals_274, primals_275, primals_276, primals_277, primals_278, primals_279, primals_280, primals_281, primals_282, primals_283, primals_284, primals_285, primals_286, primals_287, primals_288, primals_289, primals_290, primals_291, primals_292, primals_293, primals_294, primals_295, primals_296, primals_297, primals_298, primals_299, primals_300, primals_301, primals_302, primals_303, primals_304, primals_305, primals_306, primals_307, primals_308, primals_309, primals_310, primals_311, primals_312, primals_313, primals_314, primals_315, primals_316, primals_317, primals_318, primals_319, primals_320, primals_321, primals_322, primals_323, primals_324, primals_325, primals_326, primals_327, primals_328, primals_329, primals_330, primals_331, primals_332, primals_333, primals_334, primals_335, primals_336, primals_337, primals_338, primals_339, primals_340, primals_341, primals_342, primals_343) = args args.clear() assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1)) assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_4, (18,), (1,)) assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_7, (18,), (1,)) assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_10, (9,), (1,)) assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_13, (18,), (1,)) assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_16, (18,), (1,)) assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_19, (9,), (1,)) assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_22, (18,), (1,)) assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_25, (18,), (1,)) assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_28, (9,), (1,)) assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_31, (18,), (1,)) assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_34, (18,), (1,)) assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_37, (9,), (1,)) assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_40, (18,), (1,)) assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_43, (18,), (1,)) assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_46, (9,), (1,)) assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_49, (18,), (1,)) assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_52, (18,), (1,)) assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_55, (9,), (1,)) assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_58, (18,), (1,)) assert_size_stride(primals_59, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_60, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_61, (18,), (1,)) assert_size_stride(primals_62, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_63, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_64, (18,), (1,)) assert_size_stride(primals_65, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_66, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_67, (9,), (1,)) assert_size_stride(primals_68, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_69, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_70, (18,), (1,)) assert_size_stride(primals_71, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_72, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_73, (18,), (1,)) assert_size_stride(primals_74, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_75, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_76, (9,), (1,)) assert_size_stride(primals_77, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_78, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_79, (18,), (1,)) assert_size_stride(primals_80, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_81, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_82, (18,), (1,)) assert_size_stride(primals_83, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_84, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_85, (9,), (1,)) assert_size_stride(primals_86, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_87, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_88, (18,), (1,)) assert_size_stride(primals_89, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_90, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_91, (18,), (1,)) assert_size_stride(primals_92, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_93, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_94, (9,), (1,)) assert_size_stride(primals_95, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_96, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_97, (18,), (1,)) assert_size_stride(primals_98, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_99, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_100, (18,), (1,)) assert_size_stride(primals_101, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_102, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_103, (9,), (1,)) assert_size_stride(primals_104, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_105, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_106, (18,), (1,)) assert_size_stride(primals_107, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_108, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_109, (18,), (1,)) assert_size_stride(primals_110, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_111, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_112, (9,), (1,)) assert_size_stride(primals_113, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_114, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_115, (18,), (1,)) assert_size_stride(primals_116, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_117, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_118, (18,), (1,)) assert_size_stride(primals_119, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_120, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_121, (18,), (1,)) assert_size_stride(primals_122, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_123, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_124, (9,), (1,)) assert_size_stride(primals_125, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_126, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_127, (18,), (1,)) assert_size_stride(primals_128, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_129, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_130, (18,), (1,)) assert_size_stride(primals_131, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_132, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_133, (9,), (1,)) assert_size_stride(primals_134, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_135, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_136, (18,), (1,)) assert_size_stride(primals_137, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_138, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_139, (18,), (1,)) assert_size_stride(primals_140, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_141, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_142, (9,), (1,)) assert_size_stride(primals_143, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_144, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_145, (18,), (1,)) assert_size_stride(primals_146, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_147, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_148, (18,), (1,)) assert_size_stride(primals_149, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_150, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_151, (9,), (1,)) assert_size_stride(primals_152, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_153, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_154, (18,), (1,)) assert_size_stride(primals_155, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_156, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_157, (18,), (1,)) assert_size_stride(primals_158, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_159, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_160, (9,), (1,)) assert_size_stride(primals_161, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_162, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_163, (18,), (1,)) assert_size_stride(primals_164, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_165, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_166, (18,), (1,)) assert_size_stride(primals_167, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_168, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_169, (9,), (1,)) assert_size_stride(primals_170, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_171, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_172, (18,), (1,)) assert_size_stride(primals_173, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_174, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_175, (18,), (1,)) assert_size_stride(primals_176, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_177, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_178, (18,), (1,)) assert_size_stride(primals_179, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_180, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_181, (9,), (1,)) assert_size_stride(primals_182, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_183, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_184, (18,), (1,)) assert_size_stride(primals_185, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_186, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_187, (18,), (1,)) assert_size_stride(primals_188, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_189, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_190, (9,), (1,)) assert_size_stride(primals_191, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_192, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_193, (18,), (1,)) assert_size_stride(primals_194, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_195, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_196, (18,), (1,)) assert_size_stride(primals_197, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_198, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_199, (9,), (1,)) assert_size_stride(primals_200, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_201, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_202, (18,), (1,)) assert_size_stride(primals_203, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_204, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_205, (18,), (1,)) assert_size_stride(primals_206, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_207, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_208, (9,), (1,)) assert_size_stride(primals_209, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_210, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_211, (18,), (1,)) assert_size_stride(primals_212, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_213, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_214, (18,), (1,)) assert_size_stride(primals_215, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_216, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_217, (9,), (1,)) assert_size_stride(primals_218, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_219, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_220, (18,), (1,)) assert_size_stride(primals_221, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_222, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_223, (18,), (1,)) assert_size_stride(primals_224, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_225, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_226, (9,), (1,)) assert_size_stride(primals_227, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_228, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_229, (18,), (1,)) assert_size_stride(primals_230, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_231, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_232, (18,), (1,)) assert_size_stride(primals_233, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_234, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_235, (18,), (1,)) assert_size_stride(primals_236, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_237, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_238, (9,), (1,)) assert_size_stride(primals_239, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_240, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_241, (18,), (1,)) assert_size_stride(primals_242, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_243, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_244, (18,), (1,)) assert_size_stride(primals_245, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_246, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_247, (9,), (1,)) assert_size_stride(primals_248, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_249, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_250, (18,), (1,)) assert_size_stride(primals_251, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_252, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_253, (18,), (1,)) assert_size_stride(primals_254, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_255, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_256, (9,), (1,)) assert_size_stride(primals_257, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_258, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_259, (18,), (1,)) assert_size_stride(primals_260, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_261, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_262, (18,), (1,)) assert_size_stride(primals_263, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_264, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_265, (9,), (1,)) assert_size_stride(primals_266, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_267, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_268, (18,), (1,)) assert_size_stride(primals_269, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_270, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_271, (18,), (1,)) assert_size_stride(primals_272, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_273, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_274, (9,), (1,)) assert_size_stride(primals_275, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_276, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_277, (18,), (1,)) assert_size_stride(primals_278, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_279, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_280, (18,), (1,)) assert_size_stride(primals_281, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_282, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_283, (9,), (1,)) assert_size_stride(primals_284, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_285, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_286, (18,), (1,)) assert_size_stride(primals_287, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_288, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_289, (18,), (1,)) assert_size_stride(primals_290, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_291, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_292, (18,), (1,)) assert_size_stride(primals_293, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_294, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_295, (9,), (1,)) assert_size_stride(primals_296, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_297, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_298, (18,), (1,)) assert_size_stride(primals_299, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_300, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_301, (18,), (1,)) assert_size_stride(primals_302, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_303, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_304, (9,), (1,)) assert_size_stride(primals_305, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_306, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_307, (18,), (1,)) assert_size_stride(primals_308, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_309, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_310, (18,), (1,)) assert_size_stride(primals_311, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_312, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_313, (9,), (1,)) assert_size_stride(primals_314, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_315, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_316, (18,), (1,)) assert_size_stride(primals_317, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_318, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_319, (18,), (1,)) assert_size_stride(primals_320, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_321, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_322, (9,), (1,)) assert_size_stride(primals_323, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_324, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_325, (18,), (1,)) assert_size_stride(primals_326, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_327, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_328, (18,), (1,)) assert_size_stride(primals_329, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_330, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_331, (9,), (1,)) assert_size_stride(primals_332, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_333, (18, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_334, (18,), (1,)) assert_size_stride(primals_335, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_336, (18, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_337, (18,), (1,)) assert_size_stride(primals_338, (9, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_339, (9, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_340, (9,), (1,)) assert_size_stride(primals_341, (18, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_342, (18, 18, 1, 1), (18, 1, 1, 1)) assert_size_stride(primals_343, (18,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_3, buf0, 18, XBLOCK=32, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_3, primals_2, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_2[grid(294912)](buf3, primals_4, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_4 buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32 ) buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf4 buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6, primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf9 buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf10, primals_9, primals_8, buf11, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_12, buf13, 18, XBLOCK=32, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_12, primals_11, buf13, buf14, 108, XBLOCK=128, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_2[grid(294912)](buf16, primals_13, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf17 buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf18, primals_15, primals_14, buf19, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_2[grid(294912)](buf21, primals_16, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_16 buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf22 buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf23, primals_18, primals_17, buf24, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf12, primals_10, buf25, primals_19, buf26, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_21, buf27, 18, XBLOCK=32, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_21, primals_20, buf27, buf28, 108, XBLOCK=128, num_warps=4, num_stages=1) buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf30 = buf29 del buf29 triton_poi_fused_convolution_2[grid(294912)](buf30, primals_22, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_22 buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf31 buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf32, primals_24, primals_23, buf33, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf35 = buf34 del buf34 triton_poi_fused_convolution_2[grid(294912)](buf35, primals_25, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf36 buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf37, primals_27, primals_26, buf38, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_30, buf40, 18, XBLOCK=32, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_30, primals_29, buf40, buf41, 108, XBLOCK=128, num_warps=4, num_stages=1) buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_2[grid(294912)](buf43, primals_31, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_31 buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf44 buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf45, primals_33, primals_32, buf46, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf48 = buf47 del buf47 triton_poi_fused_convolution_2[grid(294912)](buf48, primals_34, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_34 buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf49 buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf50, primals_36, primals_35, buf51, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf39, primals_28, buf52, primals_37, buf53, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_39, buf54, 18, XBLOCK=32, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_39, primals_38, buf54, buf55, 108, XBLOCK=128, num_warps=4, num_stages=1) buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_2[grid(294912)](buf57, primals_40, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_40 buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf58 buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf59, primals_42, primals_41, buf60, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf62 = buf61 del buf61 triton_poi_fused_convolution_2[grid(294912)](buf62, primals_43, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf63 buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf64, primals_45, primals_44, buf65, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_48, buf67, 18, XBLOCK=32, num_warps=1, num_stages=1) buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_48, primals_47, buf67, buf68, 108, XBLOCK=128, num_warps=4, num_stages=1) buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf70 = buf69 del buf69 triton_poi_fused_convolution_2[grid(294912)](buf70, primals_49, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_49 buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf71 buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf72, primals_51, primals_50, buf73, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf75 = buf74 del buf74 triton_poi_fused_convolution_2[grid(294912)](buf75, primals_52, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_52 buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf76 buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf77, primals_54, primals_53, buf78, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf66, primals_46, buf79, primals_55, buf80, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf81 buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_6[grid(18)](buf82, primals_57, primals_56, buf83, 18, 18, XBLOCK=32, num_warps=8, num_stages=1) buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf85 = buf84 del buf84 triton_poi_fused_add_convolution_7[grid(294912)](buf85, primals_58, primals_1, 294912, XBLOCK=1024, num_warps=4, num_stages=1) del primals_58 buf86 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_60, buf86, 18, XBLOCK=32, num_warps=1, num_stages=1) buf87 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_60, primals_59, buf86, buf87, 108, XBLOCK=128, num_warps=4, num_stages=1) buf88 = extern_kernels.convolution(buf85, buf87, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf88, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf89 = buf88 del buf88 triton_poi_fused_convolution_2[grid(294912)](buf89, primals_61, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_61 buf90 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf91 = reinterpret_tensor(buf90, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf90 buf92 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf91, primals_63, primals_62, buf92, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf93 = extern_kernels.convolution(buf89, buf92, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf93, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf94 = buf93 del buf93 triton_poi_fused_convolution_2[grid(294912)](buf94, primals_64, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_64 buf95 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf96 = reinterpret_tensor(buf95, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf95 buf97 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_4[grid(9)](buf96, primals_66, primals_65, buf97, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf98 = extern_kernels.convolution(buf94, buf97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf98, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf99 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_69, buf99, 18, XBLOCK=32, num_warps=1, num_stages=1) buf100 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_69, primals_68, buf99, buf100, 108, XBLOCK=128, num_warps=4, num_stages=1) buf101 = extern_kernels.convolution(buf85, buf100, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf101, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf102 = buf101 del buf101 triton_poi_fused_convolution_2[grid(294912)](buf102, primals_70, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_70 buf103 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf104 = reinterpret_tensor(buf103, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf103 buf105 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf104, primals_72, primals_71, buf105, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf106 = extern_kernels.convolution(buf102, buf105, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf106, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf107 = buf106 del buf106 triton_poi_fused_convolution_2[grid(294912)](buf107, primals_73, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_73 buf108 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf109 = reinterpret_tensor(buf108, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf108 buf110 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf109, primals_75, primals_74, buf110, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf111 = extern_kernels.convolution(buf107, buf110, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf111, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf112 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf98, primals_67, buf111, primals_76, buf112, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf113 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_78, buf113, 18, XBLOCK=32, num_warps=1, num_stages=1) buf114 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_78, primals_77, buf113, buf114, 108, XBLOCK=128, num_warps=4, num_stages=1) buf115 = extern_kernels.convolution(buf112, buf114, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf115, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf116 = buf115 del buf115 triton_poi_fused_convolution_2[grid(294912)](buf116, primals_79, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_79 buf117 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf118 = reinterpret_tensor(buf117, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf117 buf119 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf118, primals_81, primals_80, buf119, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf120 = extern_kernels.convolution(buf116, buf119, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf120, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf121 = buf120 del buf120 triton_poi_fused_convolution_2[grid(294912)](buf121, primals_82, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_82 buf122 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf123 = reinterpret_tensor(buf122, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf122 buf124 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf123, primals_84, primals_83, buf124, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf125 = extern_kernels.convolution(buf121, buf124, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf125, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf126 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_87, buf126, 18, XBLOCK=32, num_warps=1, num_stages=1) buf127 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_87, primals_86, buf126, buf127, 108, XBLOCK=128, num_warps=4, num_stages=1) buf128 = extern_kernels.convolution(buf112, buf127, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf128, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf129 = buf128 del buf128 triton_poi_fused_convolution_2[grid(294912)](buf129, primals_88, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_88 buf130 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf131 = reinterpret_tensor(buf130, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf130 buf132 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf131, primals_90, primals_89, buf132, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf133 = extern_kernels.convolution(buf129, buf132, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf133, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf134 = buf133 del buf133 triton_poi_fused_convolution_2[grid(294912)](buf134, primals_91, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_91 buf135 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf136 = reinterpret_tensor(buf135, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf135 buf137 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf136, primals_93, primals_92, buf137, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf138 = extern_kernels.convolution(buf134, buf137, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf138, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf139 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf125, primals_85, buf138, primals_94, buf139, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf140 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_96, buf140, 18, XBLOCK=32, num_warps=1, num_stages=1) buf141 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_96, primals_95, buf140, buf141, 108, XBLOCK=128, num_warps=4, num_stages=1) buf142 = extern_kernels.convolution(buf139, buf141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf142, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf143 = buf142 del buf142 triton_poi_fused_convolution_2[grid(294912)](buf143, primals_97, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_97 buf144 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf145 = reinterpret_tensor(buf144, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf144 buf146 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf145, primals_99, primals_98, buf146, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf147 = extern_kernels.convolution(buf143, buf146, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf147, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf148 = buf147 del buf147 triton_poi_fused_convolution_2[grid(294912)](buf148, primals_100, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_100 buf149 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf150 = reinterpret_tensor(buf149, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf149 buf151 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf150, primals_102, primals_101, buf151, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf152 = extern_kernels.convolution(buf148, buf151, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf152, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf153 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_105, buf153, 18, XBLOCK=32, num_warps=1, num_stages=1) buf154 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_105, primals_104, buf153, buf154, 108, XBLOCK=128, num_warps=4, num_stages=1) buf155 = extern_kernels.convolution(buf139, buf154, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf155, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf156 = buf155 del buf155 triton_poi_fused_convolution_2[grid(294912)](buf156, primals_106, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_106 buf157 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf158 = reinterpret_tensor(buf157, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf157 buf159 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf158, primals_108, primals_107, buf159, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf160 = extern_kernels.convolution(buf156, buf159, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf160, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf161 = buf160 del buf160 triton_poi_fused_convolution_2[grid(294912)](buf161, primals_109, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_109 buf162 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf163 = reinterpret_tensor(buf162, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf162 buf164 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf163, primals_111, primals_110, buf164, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf165 = extern_kernels.convolution(buf161, buf164, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf165, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf166 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf152, primals_103, buf165, primals_112, buf166, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf167 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf168 = reinterpret_tensor(buf167, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf167 buf169 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch. float32) triton_per_fused__weight_norm_interface_6[grid(18)](buf168, primals_114, primals_113, buf169, 18, 18, XBLOCK=32, num_warps= 8, num_stages=1) buf170 = extern_kernels.convolution(buf166, buf169, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf170, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf171 = buf170 del buf170 triton_poi_fused_add_convolution_7[grid(294912)](buf171, primals_115, buf85, 294912, XBLOCK=1024, num_warps=4, num_stages=1) del primals_115 buf172 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_117, buf172, 18, XBLOCK=32, num_warps=1, num_stages=1) buf173 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_117, primals_116, buf172, buf173, 108, XBLOCK=128, num_warps=4, num_stages=1) buf174 = extern_kernels.convolution(buf171, buf173, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf174, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf175 = buf174 del buf174 triton_poi_fused_convolution_2[grid(294912)](buf175, primals_118, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_118 buf176 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf177 = reinterpret_tensor(buf176, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf176 buf178 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf177, primals_120, primals_119, buf178, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf179 = extern_kernels.convolution(buf175, buf178, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf179, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf180 = buf179 del buf179 triton_poi_fused_convolution_2[grid(294912)](buf180, primals_121, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_121 buf181 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf182 = reinterpret_tensor(buf181, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf181 buf183 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf182, primals_123, primals_122, buf183, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf184 = extern_kernels.convolution(buf180, buf183, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf184, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf185 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_126, buf185, 18, XBLOCK=32, num_warps=1, num_stages=1) buf186 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_126, primals_125, buf185, buf186, 108, XBLOCK=128, num_warps=4, num_stages=1) buf187 = extern_kernels.convolution(buf171, buf186, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf187, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf188 = buf187 del buf187 triton_poi_fused_convolution_2[grid(294912)](buf188, primals_127, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_127 buf189 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf190 = reinterpret_tensor(buf189, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf189 buf191 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf190, primals_129, primals_128, buf191, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf192 = extern_kernels.convolution(buf188, buf191, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf192, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf193 = buf192 del buf192 triton_poi_fused_convolution_2[grid(294912)](buf193, primals_130, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_130 buf194 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf195 = reinterpret_tensor(buf194, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf194 buf196 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf195, primals_132, primals_131, buf196, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf197 = extern_kernels.convolution(buf193, buf196, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf197, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf198 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf184, primals_124, buf197, primals_133, buf198, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf199 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_135, buf199, 18, XBLOCK=32, num_warps=1, num_stages=1) buf200 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_135, primals_134, buf199, buf200, 108, XBLOCK=128, num_warps=4, num_stages=1) buf201 = extern_kernels.convolution(buf198, buf200, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf201, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf202 = buf201 del buf201 triton_poi_fused_convolution_2[grid(294912)](buf202, primals_136, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_136 buf203 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf204 = reinterpret_tensor(buf203, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf203 buf205 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf204, primals_138, primals_137, buf205, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf206 = extern_kernels.convolution(buf202, buf205, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf206, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf207 = buf206 del buf206 triton_poi_fused_convolution_2[grid(294912)](buf207, primals_139, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_139 buf208 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf209 = reinterpret_tensor(buf208, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf208 buf210 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf209, primals_141, primals_140, buf210, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf211 = extern_kernels.convolution(buf207, buf210, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf211, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf212 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_144, buf212, 18, XBLOCK=32, num_warps=1, num_stages=1) buf213 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_144, primals_143, buf212, buf213, 108, XBLOCK=128, num_warps=4, num_stages=1) buf214 = extern_kernels.convolution(buf198, buf213, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf214, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf215 = buf214 del buf214 triton_poi_fused_convolution_2[grid(294912)](buf215, primals_145, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_145 buf216 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf217 = reinterpret_tensor(buf216, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf216 buf218 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf217, primals_147, primals_146, buf218, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf219 = extern_kernels.convolution(buf215, buf218, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf219, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf220 = buf219 del buf219 triton_poi_fused_convolution_2[grid(294912)](buf220, primals_148, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_148 buf221 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf222 = reinterpret_tensor(buf221, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf221 buf223 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf222, primals_150, primals_149, buf223, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf224 = extern_kernels.convolution(buf220, buf223, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf224, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf225 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf211, primals_142, buf224, primals_151, buf225, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf226 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_153, buf226, 18, XBLOCK=32, num_warps=1, num_stages=1) buf227 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_153, primals_152, buf226, buf227, 108, XBLOCK=128, num_warps=4, num_stages=1) buf228 = extern_kernels.convolution(buf225, buf227, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf228, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf229 = buf228 del buf228 triton_poi_fused_convolution_2[grid(294912)](buf229, primals_154, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_154 buf230 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf231 = reinterpret_tensor(buf230, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf230 buf232 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf231, primals_156, primals_155, buf232, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf233 = extern_kernels.convolution(buf229, buf232, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf233, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf234 = buf233 del buf233 triton_poi_fused_convolution_2[grid(294912)](buf234, primals_157, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_157 buf235 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf236 = reinterpret_tensor(buf235, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf235 buf237 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf236, primals_159, primals_158, buf237, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf238 = extern_kernels.convolution(buf234, buf237, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf238, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf239 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_162, buf239, 18, XBLOCK=32, num_warps=1, num_stages=1) buf240 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_162, primals_161, buf239, buf240, 108, XBLOCK=128, num_warps=4, num_stages=1) buf241 = extern_kernels.convolution(buf225, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf241, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf242 = buf241 del buf241 triton_poi_fused_convolution_2[grid(294912)](buf242, primals_163, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_163 buf243 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf244 = reinterpret_tensor(buf243, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf243 buf245 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf244, primals_165, primals_164, buf245, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf246 = extern_kernels.convolution(buf242, buf245, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf246, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf247 = buf246 del buf246 triton_poi_fused_convolution_2[grid(294912)](buf247, primals_166, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_166 buf248 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf249 = reinterpret_tensor(buf248, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf248 buf250 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf249, primals_168, primals_167, buf250, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf251 = extern_kernels.convolution(buf247, buf250, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf251, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf252 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf238, primals_160, buf251, primals_169, buf252, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf253 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf254 = reinterpret_tensor(buf253, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf253 buf255 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch. float32) triton_per_fused__weight_norm_interface_6[grid(18)](buf254, primals_171, primals_170, buf255, 18, 18, XBLOCK=32, num_warps= 8, num_stages=1) buf256 = extern_kernels.convolution(buf252, buf255, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf256, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf257 = buf256 del buf256 triton_poi_fused_add_convolution_7[grid(294912)](buf257, primals_172, buf171, 294912, XBLOCK=1024, num_warps=4, num_stages=1 ) del primals_172 buf258 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_174, buf258, 18, XBLOCK=32, num_warps=1, num_stages=1) buf259 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_174, primals_173, buf258, buf259, 108, XBLOCK=128, num_warps=4, num_stages=1) buf260 = extern_kernels.convolution(buf257, buf259, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf260, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf261 = buf260 del buf260 triton_poi_fused_convolution_2[grid(294912)](buf261, primals_175, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_175 buf262 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf263 = reinterpret_tensor(buf262, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf262 buf264 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf263, primals_177, primals_176, buf264, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf265 = extern_kernels.convolution(buf261, buf264, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf265, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf266 = buf265 del buf265 triton_poi_fused_convolution_2[grid(294912)](buf266, primals_178, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_178 buf267 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf268 = reinterpret_tensor(buf267, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf267 buf269 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf268, primals_180, primals_179, buf269, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf270 = extern_kernels.convolution(buf266, buf269, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf270, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf271 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_183, buf271, 18, XBLOCK=32, num_warps=1, num_stages=1) buf272 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_183, primals_182, buf271, buf272, 108, XBLOCK=128, num_warps=4, num_stages=1) buf273 = extern_kernels.convolution(buf257, buf272, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf273, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf274 = buf273 del buf273 triton_poi_fused_convolution_2[grid(294912)](buf274, primals_184, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_184 buf275 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf276 = reinterpret_tensor(buf275, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf275 buf277 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf276, primals_186, primals_185, buf277, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf278 = extern_kernels.convolution(buf274, buf277, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf278, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf279 = buf278 del buf278 triton_poi_fused_convolution_2[grid(294912)](buf279, primals_187, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_187 buf280 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf281 = reinterpret_tensor(buf280, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf280 buf282 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf281, primals_189, primals_188, buf282, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf283 = extern_kernels.convolution(buf279, buf282, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf283, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf284 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf270, primals_181, buf283, primals_190, buf284, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf285 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_192, buf285, 18, XBLOCK=32, num_warps=1, num_stages=1) buf286 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_192, primals_191, buf285, buf286, 108, XBLOCK=128, num_warps=4, num_stages=1) buf287 = extern_kernels.convolution(buf284, buf286, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf287, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf288 = buf287 del buf287 triton_poi_fused_convolution_2[grid(294912)](buf288, primals_193, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_193 buf289 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf290 = reinterpret_tensor(buf289, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf289 buf291 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf290, primals_195, primals_194, buf291, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf292 = extern_kernels.convolution(buf288, buf291, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf292, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf293 = buf292 del buf292 triton_poi_fused_convolution_2[grid(294912)](buf293, primals_196, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_196 buf294 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf295 = reinterpret_tensor(buf294, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf294 buf296 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf295, primals_198, primals_197, buf296, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf297 = extern_kernels.convolution(buf293, buf296, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf297, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf298 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_201, buf298, 18, XBLOCK=32, num_warps=1, num_stages=1) buf299 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_201, primals_200, buf298, buf299, 108, XBLOCK=128, num_warps=4, num_stages=1) buf300 = extern_kernels.convolution(buf284, buf299, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf300, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf301 = buf300 del buf300 triton_poi_fused_convolution_2[grid(294912)](buf301, primals_202, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_202 buf302 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf303 = reinterpret_tensor(buf302, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf302 buf304 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf303, primals_204, primals_203, buf304, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf305 = extern_kernels.convolution(buf301, buf304, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf305, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf306 = buf305 del buf305 triton_poi_fused_convolution_2[grid(294912)](buf306, primals_205, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_205 buf307 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf308 = reinterpret_tensor(buf307, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf307 buf309 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf308, primals_207, primals_206, buf309, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf310 = extern_kernels.convolution(buf306, buf309, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf310, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf311 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf297, primals_199, buf310, primals_208, buf311, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf312 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_210, buf312, 18, XBLOCK=32, num_warps=1, num_stages=1) buf313 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_210, primals_209, buf312, buf313, 108, XBLOCK=128, num_warps=4, num_stages=1) buf314 = extern_kernels.convolution(buf311, buf313, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf314, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf315 = buf314 del buf314 triton_poi_fused_convolution_2[grid(294912)](buf315, primals_211, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_211 buf316 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf317 = reinterpret_tensor(buf316, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf316 buf318 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf317, primals_213, primals_212, buf318, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf319 = extern_kernels.convolution(buf315, buf318, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf319, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf320 = buf319 del buf319 triton_poi_fused_convolution_2[grid(294912)](buf320, primals_214, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_214 buf321 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf322 = reinterpret_tensor(buf321, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf321 buf323 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf322, primals_216, primals_215, buf323, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf324 = extern_kernels.convolution(buf320, buf323, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf324, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf325 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_219, buf325, 18, XBLOCK=32, num_warps=1, num_stages=1) buf326 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_219, primals_218, buf325, buf326, 108, XBLOCK=128, num_warps=4, num_stages=1) buf327 = extern_kernels.convolution(buf311, buf326, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf327, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf328 = buf327 del buf327 triton_poi_fused_convolution_2[grid(294912)](buf328, primals_220, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_220 buf329 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf330 = reinterpret_tensor(buf329, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf329 buf331 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf330, primals_222, primals_221, buf331, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf332 = extern_kernels.convolution(buf328, buf331, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf332, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf333 = buf332 del buf332 triton_poi_fused_convolution_2[grid(294912)](buf333, primals_223, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_223 buf334 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf335 = reinterpret_tensor(buf334, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf334 buf336 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf335, primals_225, primals_224, buf336, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf337 = extern_kernels.convolution(buf333, buf336, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf337, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf338 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf324, primals_217, buf337, primals_226, buf338, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf339 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf340 = reinterpret_tensor(buf339, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf339 buf341 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch. float32) triton_per_fused__weight_norm_interface_6[grid(18)](buf340, primals_228, primals_227, buf341, 18, 18, XBLOCK=32, num_warps= 8, num_stages=1) buf342 = extern_kernels.convolution(buf338, buf341, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf342, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf343 = buf342 del buf342 triton_poi_fused_add_convolution_7[grid(294912)](buf343, primals_229, buf257, 294912, XBLOCK=1024, num_warps=4, num_stages=1 ) del primals_229 buf344 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_231, buf344, 18, XBLOCK=32, num_warps=1, num_stages=1) buf345 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_231, primals_230, buf344, buf345, 108, XBLOCK=128, num_warps=4, num_stages=1) buf346 = extern_kernels.convolution(buf343, buf345, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf346, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf347 = buf346 del buf346 triton_poi_fused_convolution_2[grid(294912)](buf347, primals_232, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_232 buf348 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf349 = reinterpret_tensor(buf348, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf348 buf350 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf349, primals_234, primals_233, buf350, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf351 = extern_kernels.convolution(buf347, buf350, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf351, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf352 = buf351 del buf351 triton_poi_fused_convolution_2[grid(294912)](buf352, primals_235, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_235 buf353 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf354 = reinterpret_tensor(buf353, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf353 buf355 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf354, primals_237, primals_236, buf355, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf356 = extern_kernels.convolution(buf352, buf355, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf356, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf357 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_240, buf357, 18, XBLOCK=32, num_warps=1, num_stages=1) buf358 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_240, primals_239, buf357, buf358, 108, XBLOCK=128, num_warps=4, num_stages=1) buf359 = extern_kernels.convolution(buf343, buf358, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf359, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf360 = buf359 del buf359 triton_poi_fused_convolution_2[grid(294912)](buf360, primals_241, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_241 buf361 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf362 = reinterpret_tensor(buf361, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf361 buf363 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf362, primals_243, primals_242, buf363, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf364 = extern_kernels.convolution(buf360, buf363, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf364, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf365 = buf364 del buf364 triton_poi_fused_convolution_2[grid(294912)](buf365, primals_244, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_244 buf366 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf367 = reinterpret_tensor(buf366, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf366 buf368 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf367, primals_246, primals_245, buf368, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf369 = extern_kernels.convolution(buf365, buf368, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf369, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf370 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf356, primals_238, buf369, primals_247, buf370, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf371 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_249, buf371, 18, XBLOCK=32, num_warps=1, num_stages=1) buf372 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_249, primals_248, buf371, buf372, 108, XBLOCK=128, num_warps=4, num_stages=1) buf373 = extern_kernels.convolution(buf370, buf372, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf373, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf374 = buf373 del buf373 triton_poi_fused_convolution_2[grid(294912)](buf374, primals_250, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_250 buf375 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf376 = reinterpret_tensor(buf375, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf375 buf377 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf376, primals_252, primals_251, buf377, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf378 = extern_kernels.convolution(buf374, buf377, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf378, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf379 = buf378 del buf378 triton_poi_fused_convolution_2[grid(294912)](buf379, primals_253, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_253 buf380 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf381 = reinterpret_tensor(buf380, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf380 buf382 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf381, primals_255, primals_254, buf382, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf383 = extern_kernels.convolution(buf379, buf382, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf383, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf384 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_258, buf384, 18, XBLOCK=32, num_warps=1, num_stages=1) buf385 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_258, primals_257, buf384, buf385, 108, XBLOCK=128, num_warps=4, num_stages=1) buf386 = extern_kernels.convolution(buf370, buf385, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf386, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf387 = buf386 del buf386 triton_poi_fused_convolution_2[grid(294912)](buf387, primals_259, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_259 buf388 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf389 = reinterpret_tensor(buf388, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf388 buf390 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf389, primals_261, primals_260, buf390, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf391 = extern_kernels.convolution(buf387, buf390, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf391, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf392 = buf391 del buf391 triton_poi_fused_convolution_2[grid(294912)](buf392, primals_262, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_262 buf393 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf394 = reinterpret_tensor(buf393, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf393 buf395 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf394, primals_264, primals_263, buf395, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf396 = extern_kernels.convolution(buf392, buf395, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf396, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf397 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf383, primals_256, buf396, primals_265, buf397, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf398 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_267, buf398, 18, XBLOCK=32, num_warps=1, num_stages=1) buf399 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_267, primals_266, buf398, buf399, 108, XBLOCK=128, num_warps=4, num_stages=1) buf400 = extern_kernels.convolution(buf397, buf399, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf400, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf401 = buf400 del buf400 triton_poi_fused_convolution_2[grid(294912)](buf401, primals_268, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_268 buf402 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf403 = reinterpret_tensor(buf402, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf402 buf404 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf403, primals_270, primals_269, buf404, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf405 = extern_kernels.convolution(buf401, buf404, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf405, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf406 = buf405 del buf405 triton_poi_fused_convolution_2[grid(294912)](buf406, primals_271, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_271 buf407 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf408 = reinterpret_tensor(buf407, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf407 buf409 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf408, primals_273, primals_272, buf409, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf410 = extern_kernels.convolution(buf406, buf409, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf410, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf411 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_276, buf411, 18, XBLOCK=32, num_warps=1, num_stages=1) buf412 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_276, primals_275, buf411, buf412, 108, XBLOCK=128, num_warps=4, num_stages=1) buf413 = extern_kernels.convolution(buf397, buf412, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf413, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf414 = buf413 del buf413 triton_poi_fused_convolution_2[grid(294912)](buf414, primals_277, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_277 buf415 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf416 = reinterpret_tensor(buf415, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf415 buf417 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf416, primals_279, primals_278, buf417, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf418 = extern_kernels.convolution(buf414, buf417, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf418, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf419 = buf418 del buf418 triton_poi_fused_convolution_2[grid(294912)](buf419, primals_280, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_280 buf420 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf421 = reinterpret_tensor(buf420, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf420 buf422 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf421, primals_282, primals_281, buf422, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf423 = extern_kernels.convolution(buf419, buf422, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf423, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf424 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf410, primals_274, buf423, primals_283, buf424, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf425 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf426 = reinterpret_tensor(buf425, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf425 buf427 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch. float32) triton_per_fused__weight_norm_interface_6[grid(18)](buf426, primals_285, primals_284, buf427, 18, 18, XBLOCK=32, num_warps= 8, num_stages=1) buf428 = extern_kernels.convolution(buf424, buf427, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf428, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf429 = buf428 del buf428 triton_poi_fused_add_convolution_7[grid(294912)](buf429, primals_286, buf343, 294912, XBLOCK=1024, num_warps=4, num_stages=1 ) del primals_286 buf430 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_288, buf430, 18, XBLOCK=32, num_warps=1, num_stages=1) buf431 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_288, primals_287, buf430, buf431, 108, XBLOCK=128, num_warps=4, num_stages=1) buf432 = extern_kernels.convolution(buf429, buf431, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf432, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf433 = buf432 del buf432 triton_poi_fused_convolution_2[grid(294912)](buf433, primals_289, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_289 buf434 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf435 = reinterpret_tensor(buf434, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf434 buf436 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf435, primals_291, primals_290, buf436, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf437 = extern_kernels.convolution(buf433, buf436, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf437, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf438 = buf437 del buf437 triton_poi_fused_convolution_2[grid(294912)](buf438, primals_292, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_292 buf439 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf440 = reinterpret_tensor(buf439, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf439 buf441 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf440, primals_294, primals_293, buf441, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf442 = extern_kernels.convolution(buf438, buf441, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf442, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf443 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_297, buf443, 18, XBLOCK=32, num_warps=1, num_stages=1) buf444 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_297, primals_296, buf443, buf444, 108, XBLOCK=128, num_warps=4, num_stages=1) buf445 = extern_kernels.convolution(buf429, buf444, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf445, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf446 = buf445 del buf445 triton_poi_fused_convolution_2[grid(294912)](buf446, primals_298, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_298 buf447 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf448 = reinterpret_tensor(buf447, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf447 buf449 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf448, primals_300, primals_299, buf449, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf450 = extern_kernels.convolution(buf446, buf449, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf450, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf451 = buf450 del buf450 triton_poi_fused_convolution_2[grid(294912)](buf451, primals_301, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_301 buf452 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf453 = reinterpret_tensor(buf452, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf452 buf454 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf453, primals_303, primals_302, buf454, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf455 = extern_kernels.convolution(buf451, buf454, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf455, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf456 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf442, primals_295, buf455, primals_304, buf456, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf457 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_306, buf457, 18, XBLOCK=32, num_warps=1, num_stages=1) buf458 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_306, primals_305, buf457, buf458, 108, XBLOCK=128, num_warps=4, num_stages=1) buf459 = extern_kernels.convolution(buf456, buf458, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf459, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf460 = buf459 del buf459 triton_poi_fused_convolution_2[grid(294912)](buf460, primals_307, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_307 buf461 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf462 = reinterpret_tensor(buf461, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf461 buf463 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf462, primals_309, primals_308, buf463, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf464 = extern_kernels.convolution(buf460, buf463, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf464, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf465 = buf464 del buf464 triton_poi_fused_convolution_2[grid(294912)](buf465, primals_310, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_310 buf466 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf467 = reinterpret_tensor(buf466, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf466 buf468 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf467, primals_312, primals_311, buf468, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf469 = extern_kernels.convolution(buf465, buf468, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf469, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf470 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_315, buf470, 18, XBLOCK=32, num_warps=1, num_stages=1) buf471 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_315, primals_314, buf470, buf471, 108, XBLOCK=128, num_warps=4, num_stages=1) buf472 = extern_kernels.convolution(buf456, buf471, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf472, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf473 = buf472 del buf472 triton_poi_fused_convolution_2[grid(294912)](buf473, primals_316, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_316 buf474 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf475 = reinterpret_tensor(buf474, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf474 buf476 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf475, primals_318, primals_317, buf476, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf477 = extern_kernels.convolution(buf473, buf476, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf477, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf478 = buf477 del buf477 triton_poi_fused_convolution_2[grid(294912)](buf478, primals_319, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_319 buf479 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf480 = reinterpret_tensor(buf479, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf479 buf481 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf480, primals_321, primals_320, buf481, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf482 = extern_kernels.convolution(buf478, buf481, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf482, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf483 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf469, primals_313, buf482, primals_322, buf483, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf484 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_324, buf484, 18, XBLOCK=32, num_warps=1, num_stages=1) buf485 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_324, primals_323, buf484, buf485, 108, XBLOCK=128, num_warps=4, num_stages=1) buf486 = extern_kernels.convolution(buf483, buf485, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf486, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf487 = buf486 del buf486 triton_poi_fused_convolution_2[grid(294912)](buf487, primals_325, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_325 buf488 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf489 = reinterpret_tensor(buf488, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf488 buf490 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf489, primals_327, primals_326, buf490, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf491 = extern_kernels.convolution(buf487, buf490, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf491, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf492 = buf491 del buf491 triton_poi_fused_convolution_2[grid(294912)](buf492, primals_328, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_328 buf493 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf494 = reinterpret_tensor(buf493, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf493 buf495 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf494, primals_330, primals_329, buf495, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf496 = extern_kernels.convolution(buf492, buf495, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf496, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf497 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_0[grid(18)](primals_333, buf497, 18, XBLOCK=32, num_warps=1, num_stages=1) buf498 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(108)](primals_333, primals_332, buf497, buf498, 108, XBLOCK=128, num_warps=4, num_stages=1) buf499 = extern_kernels.convolution(buf483, buf498, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf499, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf500 = buf499 del buf499 triton_poi_fused_convolution_2[grid(294912)](buf500, primals_334, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_334 buf501 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf502 = reinterpret_tensor(buf501, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf501 buf503 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(18)](buf502, primals_336, primals_335, buf503, 18, 9, XBLOCK=32, num_warps=4, num_stages=1) buf504 = extern_kernels.convolution(buf500, buf503, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None) assert_size_stride(buf504, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf505 = buf504 del buf504 triton_poi_fused_convolution_2[grid(294912)](buf505, primals_337, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_337 buf506 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32) buf507 = reinterpret_tensor(buf506, (9, 1, 1, 1), (1, 1, 1, 1), 0) del buf506 buf508 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32 ) triton_per_fused__weight_norm_interface_4[grid(9)](buf507, primals_339, primals_338, buf508, 9, 18, XBLOCK=1, num_warps=2, num_stages=1) buf509 = extern_kernels.convolution(buf505, buf508, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf509, (4, 9, 64, 64), (36864, 4096, 64, 1)) buf510 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32) triton_poi_fused_cat_5[grid(294912)](buf496, primals_331, buf509, primals_340, buf510, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf511 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch. float32) buf512 = reinterpret_tensor(buf511, (18, 1, 1, 1), (1, 1, 1, 1), 0) del buf511 buf513 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch. float32) triton_per_fused__weight_norm_interface_6[grid(18)](buf512, primals_342, primals_341, buf513, 18, 18, XBLOCK=32, num_warps= 8, num_stages=1) buf514 = extern_kernels.convolution(buf510, buf513, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf514, (4, 18, 64, 64), (73728, 4096, 64, 1)) buf515 = buf514 del buf514 triton_poi_fused_add_convolution_8[grid(294912)](buf515, primals_343, buf429, primals_1, 294912, XBLOCK=512, num_warps=8, num_stages=1) del primals_343 buf516 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf509, primals_340, buf516, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf509 del primals_340 buf517 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf496, primals_331, buf517, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf496 del primals_331 buf518 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf482, primals_322, buf518, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf482 del primals_322 buf519 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf469, primals_313, buf519, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf469 del primals_313 buf520 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf455, primals_304, buf520, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf455 del primals_304 buf521 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf442, primals_295, buf521, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf442 del primals_295 buf522 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf423, primals_283, buf522, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf423 del primals_283 buf523 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf410, primals_274, buf523, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf410 del primals_274 buf524 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf396, primals_265, buf524, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf396 del primals_265 buf525 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf383, primals_256, buf525, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf383 del primals_256 buf526 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf369, primals_247, buf526, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf369 del primals_247 buf527 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf356, primals_238, buf527, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf356 del primals_238 buf528 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf337, primals_226, buf528, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf337 del primals_226 buf529 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf324, primals_217, buf529, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf324 del primals_217 buf530 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf310, primals_208, buf530, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf310 del primals_208 buf531 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf297, primals_199, buf531, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf297 del primals_199 buf532 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf283, primals_190, buf532, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf283 del primals_190 buf533 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf270, primals_181, buf533, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf270 del primals_181 buf534 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf251, primals_169, buf534, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf251 del primals_169 buf535 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf238, primals_160, buf535, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf238 del primals_160 buf536 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf224, primals_151, buf536, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf224 del primals_151 buf537 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf211, primals_142, buf537, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf211 del primals_142 buf538 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf197, primals_133, buf538, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf197 del primals_133 buf539 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf184, primals_124, buf539, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf184 del primals_124 buf540 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf165, primals_112, buf540, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf165 del primals_112 buf541 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf152, primals_103, buf541, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf152 del primals_103 buf542 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf138, primals_94, buf542, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf138 del primals_94 buf543 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf125, primals_85, buf543, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf125 del primals_85 buf544 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf111, primals_76, buf544, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf111 del primals_76 buf545 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf98, primals_67, buf545, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf98 del primals_67 buf546 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf79, primals_55, buf546, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf79 del primals_55 buf547 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf66, primals_46, buf547, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf66 del primals_46 buf548 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf52, primals_37, buf548, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf52 del primals_37 buf549 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf39, primals_28, buf549, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf39 del primals_28 buf550 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf25, primals_19, buf550, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf25 del primals_19 buf551 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid( 147456)](buf12, primals_10, buf551, 147456, XBLOCK=1024, num_warps=4, num_stages=1) del buf12 del primals_10 return (buf515, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33, buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73, buf78, buf83, buf87, buf92, buf97, buf100, buf105, buf110, buf114, buf119, buf124, buf127, buf132, buf137, buf141, buf146, buf151, buf154, buf159, buf164, buf169, buf173, buf178, buf183, buf186, buf191, buf196, buf200, buf205, buf210, buf213, buf218, buf223, buf227, buf232, buf237, buf240, buf245, buf250, buf255, buf259, buf264, buf269, buf272, buf277, buf282, buf286, buf291, buf296, buf299, buf304, buf309, buf313, buf318, buf323, buf326, buf331, buf336, buf341, buf345, buf350, buf355, buf358, buf363, buf368, buf372, buf377, buf382, buf385, buf390, buf395, buf399, buf404, buf409, buf412, buf417, buf422, buf427, buf431, buf436, buf441, buf444, buf449, buf454, buf458, buf463, buf468, buf471, buf476, buf481, buf485, buf490, buf495, buf498, buf503, buf508, buf513, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_54, primals_56, primals_57, primals_59, primals_60, primals_62, primals_63, primals_65, primals_66, primals_68, primals_69, primals_71, primals_72, primals_74, primals_75, primals_77, primals_78, primals_80, primals_81, primals_83, primals_84, primals_86, primals_87, primals_89, primals_90, primals_92, primals_93, primals_95, primals_96, primals_98, primals_99, primals_101, primals_102, primals_104, primals_105, primals_107, primals_108, primals_110, primals_111, primals_113, primals_114, primals_116, primals_117, primals_119, primals_120, primals_122, primals_123, primals_125, primals_126, primals_128, primals_129, primals_131, primals_132, primals_134, primals_135, primals_137, primals_138, primals_140, primals_141, primals_143, primals_144, primals_146, primals_147, primals_149, primals_150, primals_152, primals_153, primals_155, primals_156, primals_158, primals_159, primals_161, primals_162, primals_164, primals_165, primals_167, primals_168, primals_170, primals_171, primals_173, primals_174, primals_176, primals_177, primals_179, primals_180, primals_182, primals_183, primals_185, primals_186, primals_188, primals_189, primals_191, primals_192, primals_194, primals_195, primals_197, primals_198, primals_200, primals_201, primals_203, primals_204, primals_206, primals_207, primals_209, primals_210, primals_212, primals_213, primals_215, primals_216, primals_218, primals_219, primals_221, primals_222, primals_224, primals_225, primals_227, primals_228, primals_230, primals_231, primals_233, primals_234, primals_236, primals_237, primals_239, primals_240, primals_242, primals_243, primals_245, primals_246, primals_248, primals_249, primals_251, primals_252, primals_254, primals_255, primals_257, primals_258, primals_260, primals_261, primals_263, primals_264, primals_266, primals_267, primals_269, primals_270, primals_272, primals_273, primals_275, primals_276, primals_278, primals_279, primals_281, primals_282, primals_284, primals_285, primals_287, primals_288, primals_290, primals_291, primals_293, primals_294, primals_296, primals_297, primals_299, primals_300, primals_302, primals_303, primals_305, primals_306, primals_308, primals_309, primals_311, primals_312, primals_314, primals_315, primals_317, primals_318, primals_320, primals_321, primals_323, primals_324, primals_326, primals_327, primals_329, primals_330, primals_332, primals_333, primals_335, primals_336, primals_338, primals_339, primals_341, primals_342, buf0, buf1, buf3, buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80, buf82, buf83, buf85, buf86, buf87, buf89, buf91, buf92, buf94, buf96, buf97, buf99, buf100, buf102, buf104, buf105, buf107, buf109, buf110, buf112, buf113, buf114, buf116, buf118, buf119, buf121, buf123, buf124, buf126, buf127, buf129, buf131, buf132, buf134, buf136, buf137, buf139, buf140, buf141, buf143, buf145, buf146, buf148, buf150, buf151, buf153, buf154, buf156, buf158, buf159, buf161, buf163, buf164, buf166, buf168, buf169, buf171, buf172, buf173, buf175, buf177, buf178, buf180, buf182, buf183, buf185, buf186, buf188, buf190, buf191, buf193, buf195, buf196, buf198, buf199, buf200, buf202, buf204, buf205, buf207, buf209, buf210, buf212, buf213, buf215, buf217, buf218, buf220, buf222, buf223, buf225, buf226, buf227, buf229, buf231, buf232, buf234, buf236, buf237, buf239, buf240, buf242, buf244, buf245, buf247, buf249, buf250, buf252, buf254, buf255, buf257, buf258, buf259, buf261, buf263, buf264, buf266, buf268, buf269, buf271, buf272, buf274, buf276, buf277, buf279, buf281, buf282, buf284, buf285, buf286, buf288, buf290, buf291, buf293, buf295, buf296, buf298, buf299, buf301, buf303, buf304, buf306, buf308, buf309, buf311, buf312, buf313, buf315, buf317, buf318, buf320, buf322, buf323, buf325, buf326, buf328, buf330, buf331, buf333, buf335, buf336, buf338, buf340, buf341, buf343, buf344, buf345, buf347, buf349, buf350, buf352, buf354, buf355, buf357, buf358, buf360, buf362, buf363, buf365, buf367, buf368, buf370, buf371, buf372, buf374, buf376, buf377, buf379, buf381, buf382, buf384, buf385, buf387, buf389, buf390, buf392, buf394, buf395, buf397, buf398, buf399, buf401, buf403, buf404, buf406, buf408, buf409, buf411, buf412, buf414, buf416, buf417, buf419, buf421, buf422, buf424, buf426, buf427, buf429, buf430, buf431, buf433, buf435, buf436, buf438, buf440, buf441, buf443, buf444, buf446, buf448, buf449, buf451, buf453, buf454, buf456, buf457, buf458, buf460, buf462, buf463, buf465, buf467, buf468, buf470, buf471, buf473, buf475, buf476, buf478, buf480, buf481, buf483, buf484, buf485, buf487, buf489, buf490, buf492, buf494, buf495, buf497, buf498, buf500, buf502, buf503, buf505, buf507, buf508, buf510, buf512, buf513, buf516, buf517, buf518, buf519, buf520, buf521, buf522, buf523, buf524, buf525, buf526, buf527, buf528, buf529, buf530, buf531, buf532, buf533, buf534, buf535, buf536, buf537, buf538, buf539, buf540, buf541, buf542, buf543, buf544, buf545, buf546, buf547, buf548, buf549, buf550, buf551) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, groups=3): super(ConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=1, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class ConvBlockD(nn.Module): def __init__(self, in_channels, out_channels, groups=3, ker_size=2): super(ConvBlockD, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.groups = groups def wn(x): return torch.nn.utils.weight_norm(x) self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 1, groups=self.groups)) self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels, 3, padding=ker_size, dilation=ker_size, groups=in_channels)) self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels, 1, groups=1)) def forward(self, x): x = self.group_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class MIRB1(nn.Module): def __init__(self, args): super(MIRB1, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlock(args.n_feats, self.c_out) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlock(args.n_feats, self.c_out) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlock(args.n_feats, self.c_out) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out class MIRB2(nn.Module): def __init__(self, args): super(MIRB2, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=2) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out class MIRB3(nn.Module): def __init__(self, args): super(MIRB3, self).__init__() self.c_out = args.n_feats // 2 def wn(x): return torch.nn.utils.weight_norm(x) self.conv3_1 = ConvBlock(args.n_feats, self.c_out) self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv3_2 = ConvBlock(args.n_feats, self.c_out) self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv3_3 = ConvBlock(args.n_feats, self.c_out) self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=3) self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): res = x c1_1 = self.lrelu(self.conv3_1(res)) c2_1 = self.lrelu(self.convd_1(res)) c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1))) c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1))) c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1))) c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1))) out = self.conv_last(torch.cat([c1_4, c2_4], 1)) out = out + x return out class MMFBNew(nn.Module): def __init__(self, args): super(MMFBNew, self).__init__() def wn(x): return torch.nn.utils.weight_norm(x) self.bs1 = MIRB1(args) self.bs11 = MIRB1(args) self.bs2 = MIRB2(args) self.bs22 = MIRB2(args) self.bs3 = MIRB3(args) self.bs33 = MIRB3(args) def forward(self, input_0): primals_4 = self.bs1.conv3_1.group_conv.bias primals_2 = self.bs1.conv3_1.group_conv.weight_g primals_3 = self.bs1.conv3_1.group_conv.weight_v primals_7 = self.bs1.conv3_1.depth_conv.bias primals_5 = self.bs1.conv3_1.depth_conv.weight_g primals_6 = self.bs1.conv3_1.depth_conv.weight_v primals_10 = self.bs1.conv3_1.point_conv.bias primals_8 = self.bs1.conv3_1.point_conv.weight_g primals_9 = self.bs1.conv3_1.point_conv.weight_v primals_13 = self.bs1.convd_1.group_conv.bias primals_11 = self.bs1.convd_1.group_conv.weight_g primals_12 = self.bs1.convd_1.group_conv.weight_v primals_16 = self.bs1.convd_1.depth_conv.bias primals_14 = self.bs1.convd_1.depth_conv.weight_g primals_15 = self.bs1.convd_1.depth_conv.weight_v primals_19 = self.bs1.convd_1.point_conv.bias primals_17 = self.bs1.convd_1.point_conv.weight_g primals_18 = self.bs1.convd_1.point_conv.weight_v primals_22 = self.bs1.conv3_2.group_conv.bias primals_20 = self.bs1.conv3_2.group_conv.weight_g primals_21 = self.bs1.conv3_2.group_conv.weight_v primals_25 = self.bs1.conv3_2.depth_conv.bias primals_23 = self.bs1.conv3_2.depth_conv.weight_g primals_24 = self.bs1.conv3_2.depth_conv.weight_v primals_28 = self.bs1.conv3_2.point_conv.bias primals_26 = self.bs1.conv3_2.point_conv.weight_g primals_27 = self.bs1.conv3_2.point_conv.weight_v primals_31 = self.bs1.convd_2.group_conv.bias primals_29 = self.bs1.convd_2.group_conv.weight_g primals_30 = self.bs1.convd_2.group_conv.weight_v primals_34 = self.bs1.convd_2.depth_conv.bias primals_32 = self.bs1.convd_2.depth_conv.weight_g primals_33 = self.bs1.convd_2.depth_conv.weight_v primals_37 = self.bs1.convd_2.point_conv.bias primals_35 = self.bs1.convd_2.point_conv.weight_g primals_36 = self.bs1.convd_2.point_conv.weight_v primals_40 = self.bs1.conv3_3.group_conv.bias primals_38 = self.bs1.conv3_3.group_conv.weight_g primals_39 = self.bs1.conv3_3.group_conv.weight_v primals_43 = self.bs1.conv3_3.depth_conv.bias primals_41 = self.bs1.conv3_3.depth_conv.weight_g primals_42 = self.bs1.conv3_3.depth_conv.weight_v primals_46 = self.bs1.conv3_3.point_conv.bias primals_44 = self.bs1.conv3_3.point_conv.weight_g primals_45 = self.bs1.conv3_3.point_conv.weight_v primals_49 = self.bs1.convd_3.group_conv.bias primals_47 = self.bs1.convd_3.group_conv.weight_g primals_48 = self.bs1.convd_3.group_conv.weight_v primals_52 = self.bs1.convd_3.depth_conv.bias primals_50 = self.bs1.convd_3.depth_conv.weight_g primals_51 = self.bs1.convd_3.depth_conv.weight_v primals_55 = self.bs1.convd_3.point_conv.bias primals_53 = self.bs1.convd_3.point_conv.weight_g primals_54 = self.bs1.convd_3.point_conv.weight_v primals_58 = self.bs1.conv_last.bias primals_56 = self.bs1.conv_last.weight_g primals_57 = self.bs1.conv_last.weight_v primals_61 = self.bs11.conv3_1.group_conv.bias primals_59 = self.bs11.conv3_1.group_conv.weight_g primals_60 = self.bs11.conv3_1.group_conv.weight_v primals_64 = self.bs11.conv3_1.depth_conv.bias primals_62 = self.bs11.conv3_1.depth_conv.weight_g primals_63 = self.bs11.conv3_1.depth_conv.weight_v primals_67 = self.bs11.conv3_1.point_conv.bias primals_65 = self.bs11.conv3_1.point_conv.weight_g primals_66 = self.bs11.conv3_1.point_conv.weight_v primals_70 = self.bs11.convd_1.group_conv.bias primals_68 = self.bs11.convd_1.group_conv.weight_g primals_69 = self.bs11.convd_1.group_conv.weight_v primals_73 = self.bs11.convd_1.depth_conv.bias primals_71 = self.bs11.convd_1.depth_conv.weight_g primals_72 = self.bs11.convd_1.depth_conv.weight_v primals_76 = self.bs11.convd_1.point_conv.bias primals_74 = self.bs11.convd_1.point_conv.weight_g primals_75 = self.bs11.convd_1.point_conv.weight_v primals_79 = self.bs11.conv3_2.group_conv.bias primals_77 = self.bs11.conv3_2.group_conv.weight_g primals_78 = self.bs11.conv3_2.group_conv.weight_v primals_82 = self.bs11.conv3_2.depth_conv.bias primals_80 = self.bs11.conv3_2.depth_conv.weight_g primals_81 = self.bs11.conv3_2.depth_conv.weight_v primals_85 = self.bs11.conv3_2.point_conv.bias primals_83 = self.bs11.conv3_2.point_conv.weight_g primals_84 = self.bs11.conv3_2.point_conv.weight_v primals_88 = self.bs11.convd_2.group_conv.bias primals_86 = self.bs11.convd_2.group_conv.weight_g primals_87 = self.bs11.convd_2.group_conv.weight_v primals_91 = self.bs11.convd_2.depth_conv.bias primals_89 = self.bs11.convd_2.depth_conv.weight_g primals_90 = self.bs11.convd_2.depth_conv.weight_v primals_94 = self.bs11.convd_2.point_conv.bias primals_92 = self.bs11.convd_2.point_conv.weight_g primals_93 = self.bs11.convd_2.point_conv.weight_v primals_97 = self.bs11.conv3_3.group_conv.bias primals_95 = self.bs11.conv3_3.group_conv.weight_g primals_96 = self.bs11.conv3_3.group_conv.weight_v primals_100 = self.bs11.conv3_3.depth_conv.bias primals_98 = self.bs11.conv3_3.depth_conv.weight_g primals_99 = self.bs11.conv3_3.depth_conv.weight_v primals_103 = self.bs11.conv3_3.point_conv.bias primals_101 = self.bs11.conv3_3.point_conv.weight_g primals_102 = self.bs11.conv3_3.point_conv.weight_v primals_106 = self.bs11.convd_3.group_conv.bias primals_104 = self.bs11.convd_3.group_conv.weight_g primals_105 = self.bs11.convd_3.group_conv.weight_v primals_109 = self.bs11.convd_3.depth_conv.bias primals_107 = self.bs11.convd_3.depth_conv.weight_g primals_108 = self.bs11.convd_3.depth_conv.weight_v primals_112 = self.bs11.convd_3.point_conv.bias primals_110 = self.bs11.convd_3.point_conv.weight_g primals_111 = self.bs11.convd_3.point_conv.weight_v primals_115 = self.bs11.conv_last.bias primals_113 = self.bs11.conv_last.weight_g primals_114 = self.bs11.conv_last.weight_v primals_118 = self.bs2.conv3_1.group_conv.bias primals_116 = self.bs2.conv3_1.group_conv.weight_g primals_117 = self.bs2.conv3_1.group_conv.weight_v primals_121 = self.bs2.conv3_1.depth_conv.bias primals_119 = self.bs2.conv3_1.depth_conv.weight_g primals_120 = self.bs2.conv3_1.depth_conv.weight_v primals_124 = self.bs2.conv3_1.point_conv.bias primals_122 = self.bs2.conv3_1.point_conv.weight_g primals_123 = self.bs2.conv3_1.point_conv.weight_v primals_127 = self.bs2.convd_1.group_conv.bias primals_125 = self.bs2.convd_1.group_conv.weight_g primals_126 = self.bs2.convd_1.group_conv.weight_v primals_130 = self.bs2.convd_1.depth_conv.bias primals_128 = self.bs2.convd_1.depth_conv.weight_g primals_129 = self.bs2.convd_1.depth_conv.weight_v primals_133 = self.bs2.convd_1.point_conv.bias primals_131 = self.bs2.convd_1.point_conv.weight_g primals_132 = self.bs2.convd_1.point_conv.weight_v primals_136 = self.bs2.conv3_2.group_conv.bias primals_134 = self.bs2.conv3_2.group_conv.weight_g primals_135 = self.bs2.conv3_2.group_conv.weight_v primals_139 = self.bs2.conv3_2.depth_conv.bias primals_137 = self.bs2.conv3_2.depth_conv.weight_g primals_138 = self.bs2.conv3_2.depth_conv.weight_v primals_142 = self.bs2.conv3_2.point_conv.bias primals_140 = self.bs2.conv3_2.point_conv.weight_g primals_141 = self.bs2.conv3_2.point_conv.weight_v primals_145 = self.bs2.convd_2.group_conv.bias primals_143 = self.bs2.convd_2.group_conv.weight_g primals_144 = self.bs2.convd_2.group_conv.weight_v primals_148 = self.bs2.convd_2.depth_conv.bias primals_146 = self.bs2.convd_2.depth_conv.weight_g primals_147 = self.bs2.convd_2.depth_conv.weight_v primals_151 = self.bs2.convd_2.point_conv.bias primals_149 = self.bs2.convd_2.point_conv.weight_g primals_150 = self.bs2.convd_2.point_conv.weight_v primals_154 = self.bs2.conv3_3.group_conv.bias primals_152 = self.bs2.conv3_3.group_conv.weight_g primals_153 = self.bs2.conv3_3.group_conv.weight_v primals_157 = self.bs2.conv3_3.depth_conv.bias primals_155 = self.bs2.conv3_3.depth_conv.weight_g primals_156 = self.bs2.conv3_3.depth_conv.weight_v primals_160 = self.bs2.conv3_3.point_conv.bias primals_158 = self.bs2.conv3_3.point_conv.weight_g primals_159 = self.bs2.conv3_3.point_conv.weight_v primals_163 = self.bs2.convd_3.group_conv.bias primals_161 = self.bs2.convd_3.group_conv.weight_g primals_162 = self.bs2.convd_3.group_conv.weight_v primals_166 = self.bs2.convd_3.depth_conv.bias primals_164 = self.bs2.convd_3.depth_conv.weight_g primals_165 = self.bs2.convd_3.depth_conv.weight_v primals_169 = self.bs2.convd_3.point_conv.bias primals_167 = self.bs2.convd_3.point_conv.weight_g primals_168 = self.bs2.convd_3.point_conv.weight_v primals_172 = self.bs2.conv_last.bias primals_170 = self.bs2.conv_last.weight_g primals_171 = self.bs2.conv_last.weight_v primals_175 = self.bs22.conv3_1.group_conv.bias primals_173 = self.bs22.conv3_1.group_conv.weight_g primals_174 = self.bs22.conv3_1.group_conv.weight_v primals_178 = self.bs22.conv3_1.depth_conv.bias primals_176 = self.bs22.conv3_1.depth_conv.weight_g primals_177 = self.bs22.conv3_1.depth_conv.weight_v primals_181 = self.bs22.conv3_1.point_conv.bias primals_179 = self.bs22.conv3_1.point_conv.weight_g primals_180 = self.bs22.conv3_1.point_conv.weight_v primals_184 = self.bs22.convd_1.group_conv.bias primals_182 = self.bs22.convd_1.group_conv.weight_g primals_183 = self.bs22.convd_1.group_conv.weight_v primals_187 = self.bs22.convd_1.depth_conv.bias primals_185 = self.bs22.convd_1.depth_conv.weight_g primals_186 = self.bs22.convd_1.depth_conv.weight_v primals_190 = self.bs22.convd_1.point_conv.bias primals_188 = self.bs22.convd_1.point_conv.weight_g primals_189 = self.bs22.convd_1.point_conv.weight_v primals_193 = self.bs22.conv3_2.group_conv.bias primals_191 = self.bs22.conv3_2.group_conv.weight_g primals_192 = self.bs22.conv3_2.group_conv.weight_v primals_196 = self.bs22.conv3_2.depth_conv.bias primals_194 = self.bs22.conv3_2.depth_conv.weight_g primals_195 = self.bs22.conv3_2.depth_conv.weight_v primals_199 = self.bs22.conv3_2.point_conv.bias primals_197 = self.bs22.conv3_2.point_conv.weight_g primals_198 = self.bs22.conv3_2.point_conv.weight_v primals_202 = self.bs22.convd_2.group_conv.bias primals_200 = self.bs22.convd_2.group_conv.weight_g primals_201 = self.bs22.convd_2.group_conv.weight_v primals_205 = self.bs22.convd_2.depth_conv.bias primals_203 = self.bs22.convd_2.depth_conv.weight_g primals_204 = self.bs22.convd_2.depth_conv.weight_v primals_208 = self.bs22.convd_2.point_conv.bias primals_206 = self.bs22.convd_2.point_conv.weight_g primals_207 = self.bs22.convd_2.point_conv.weight_v primals_211 = self.bs22.conv3_3.group_conv.bias primals_209 = self.bs22.conv3_3.group_conv.weight_g primals_210 = self.bs22.conv3_3.group_conv.weight_v primals_214 = self.bs22.conv3_3.depth_conv.bias primals_212 = self.bs22.conv3_3.depth_conv.weight_g primals_213 = self.bs22.conv3_3.depth_conv.weight_v primals_217 = self.bs22.conv3_3.point_conv.bias primals_215 = self.bs22.conv3_3.point_conv.weight_g primals_216 = self.bs22.conv3_3.point_conv.weight_v primals_220 = self.bs22.convd_3.group_conv.bias primals_218 = self.bs22.convd_3.group_conv.weight_g primals_219 = self.bs22.convd_3.group_conv.weight_v primals_223 = self.bs22.convd_3.depth_conv.bias primals_221 = self.bs22.convd_3.depth_conv.weight_g primals_222 = self.bs22.convd_3.depth_conv.weight_v primals_226 = self.bs22.convd_3.point_conv.bias primals_224 = self.bs22.convd_3.point_conv.weight_g primals_225 = self.bs22.convd_3.point_conv.weight_v primals_229 = self.bs22.conv_last.bias primals_227 = self.bs22.conv_last.weight_g primals_228 = self.bs22.conv_last.weight_v primals_232 = self.bs3.conv3_1.group_conv.bias primals_230 = self.bs3.conv3_1.group_conv.weight_g primals_231 = self.bs3.conv3_1.group_conv.weight_v primals_235 = self.bs3.conv3_1.depth_conv.bias primals_233 = self.bs3.conv3_1.depth_conv.weight_g primals_234 = self.bs3.conv3_1.depth_conv.weight_v primals_238 = self.bs3.conv3_1.point_conv.bias primals_236 = self.bs3.conv3_1.point_conv.weight_g primals_237 = self.bs3.conv3_1.point_conv.weight_v primals_241 = self.bs3.convd_1.group_conv.bias primals_239 = self.bs3.convd_1.group_conv.weight_g primals_240 = self.bs3.convd_1.group_conv.weight_v primals_244 = self.bs3.convd_1.depth_conv.bias primals_242 = self.bs3.convd_1.depth_conv.weight_g primals_243 = self.bs3.convd_1.depth_conv.weight_v primals_247 = self.bs3.convd_1.point_conv.bias primals_245 = self.bs3.convd_1.point_conv.weight_g primals_246 = self.bs3.convd_1.point_conv.weight_v primals_250 = self.bs3.conv3_2.group_conv.bias primals_248 = self.bs3.conv3_2.group_conv.weight_g primals_249 = self.bs3.conv3_2.group_conv.weight_v primals_253 = self.bs3.conv3_2.depth_conv.bias primals_251 = self.bs3.conv3_2.depth_conv.weight_g primals_252 = self.bs3.conv3_2.depth_conv.weight_v primals_256 = self.bs3.conv3_2.point_conv.bias primals_254 = self.bs3.conv3_2.point_conv.weight_g primals_255 = self.bs3.conv3_2.point_conv.weight_v primals_259 = self.bs3.convd_2.group_conv.bias primals_257 = self.bs3.convd_2.group_conv.weight_g primals_258 = self.bs3.convd_2.group_conv.weight_v primals_262 = self.bs3.convd_2.depth_conv.bias primals_260 = self.bs3.convd_2.depth_conv.weight_g primals_261 = self.bs3.convd_2.depth_conv.weight_v primals_265 = self.bs3.convd_2.point_conv.bias primals_263 = self.bs3.convd_2.point_conv.weight_g primals_264 = self.bs3.convd_2.point_conv.weight_v primals_268 = self.bs3.conv3_3.group_conv.bias primals_266 = self.bs3.conv3_3.group_conv.weight_g primals_267 = self.bs3.conv3_3.group_conv.weight_v primals_271 = self.bs3.conv3_3.depth_conv.bias primals_269 = self.bs3.conv3_3.depth_conv.weight_g primals_270 = self.bs3.conv3_3.depth_conv.weight_v primals_274 = self.bs3.conv3_3.point_conv.bias primals_272 = self.bs3.conv3_3.point_conv.weight_g primals_273 = self.bs3.conv3_3.point_conv.weight_v primals_277 = self.bs3.convd_3.group_conv.bias primals_275 = self.bs3.convd_3.group_conv.weight_g primals_276 = self.bs3.convd_3.group_conv.weight_v primals_280 = self.bs3.convd_3.depth_conv.bias primals_278 = self.bs3.convd_3.depth_conv.weight_g primals_279 = self.bs3.convd_3.depth_conv.weight_v primals_283 = self.bs3.convd_3.point_conv.bias primals_281 = self.bs3.convd_3.point_conv.weight_g primals_282 = self.bs3.convd_3.point_conv.weight_v primals_286 = self.bs3.conv_last.bias primals_284 = self.bs3.conv_last.weight_g primals_285 = self.bs3.conv_last.weight_v primals_289 = self.bs33.conv3_1.group_conv.bias primals_287 = self.bs33.conv3_1.group_conv.weight_g primals_288 = self.bs33.conv3_1.group_conv.weight_v primals_292 = self.bs33.conv3_1.depth_conv.bias primals_290 = self.bs33.conv3_1.depth_conv.weight_g primals_291 = self.bs33.conv3_1.depth_conv.weight_v primals_295 = self.bs33.conv3_1.point_conv.bias primals_293 = self.bs33.conv3_1.point_conv.weight_g primals_294 = self.bs33.conv3_1.point_conv.weight_v primals_298 = self.bs33.convd_1.group_conv.bias primals_296 = self.bs33.convd_1.group_conv.weight_g primals_297 = self.bs33.convd_1.group_conv.weight_v primals_301 = self.bs33.convd_1.depth_conv.bias primals_299 = self.bs33.convd_1.depth_conv.weight_g primals_300 = self.bs33.convd_1.depth_conv.weight_v primals_304 = self.bs33.convd_1.point_conv.bias primals_302 = self.bs33.convd_1.point_conv.weight_g primals_303 = self.bs33.convd_1.point_conv.weight_v primals_307 = self.bs33.conv3_2.group_conv.bias primals_305 = self.bs33.conv3_2.group_conv.weight_g primals_306 = self.bs33.conv3_2.group_conv.weight_v primals_310 = self.bs33.conv3_2.depth_conv.bias primals_308 = self.bs33.conv3_2.depth_conv.weight_g primals_309 = self.bs33.conv3_2.depth_conv.weight_v primals_313 = self.bs33.conv3_2.point_conv.bias primals_311 = self.bs33.conv3_2.point_conv.weight_g primals_312 = self.bs33.conv3_2.point_conv.weight_v primals_316 = self.bs33.convd_2.group_conv.bias primals_314 = self.bs33.convd_2.group_conv.weight_g primals_315 = self.bs33.convd_2.group_conv.weight_v primals_319 = self.bs33.convd_2.depth_conv.bias primals_317 = self.bs33.convd_2.depth_conv.weight_g primals_318 = self.bs33.convd_2.depth_conv.weight_v primals_322 = self.bs33.convd_2.point_conv.bias primals_320 = self.bs33.convd_2.point_conv.weight_g primals_321 = self.bs33.convd_2.point_conv.weight_v primals_325 = self.bs33.conv3_3.group_conv.bias primals_323 = self.bs33.conv3_3.group_conv.weight_g primals_324 = self.bs33.conv3_3.group_conv.weight_v primals_328 = self.bs33.conv3_3.depth_conv.bias primals_326 = self.bs33.conv3_3.depth_conv.weight_g primals_327 = self.bs33.conv3_3.depth_conv.weight_v primals_331 = self.bs33.conv3_3.point_conv.bias primals_329 = self.bs33.conv3_3.point_conv.weight_g primals_330 = self.bs33.conv3_3.point_conv.weight_v primals_334 = self.bs33.convd_3.group_conv.bias primals_332 = self.bs33.convd_3.group_conv.weight_g primals_333 = self.bs33.convd_3.group_conv.weight_v primals_337 = self.bs33.convd_3.depth_conv.bias primals_335 = self.bs33.convd_3.depth_conv.weight_g primals_336 = self.bs33.convd_3.depth_conv.weight_v primals_340 = self.bs33.convd_3.point_conv.bias primals_338 = self.bs33.convd_3.point_conv.weight_g primals_339 = self.bs33.convd_3.point_conv.weight_v primals_343 = self.bs33.conv_last.bias primals_341 = self.bs33.conv_last.weight_g primals_342 = self.bs33.conv_last.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185, primals_186, primals_187, primals_188, primals_189, primals_190, primals_191, primals_192, primals_193, primals_194, primals_195, primals_196, primals_197, primals_198, primals_199, primals_200, primals_201, primals_202, primals_203, primals_204, primals_205, primals_206, primals_207, primals_208, primals_209, primals_210, primals_211, primals_212, primals_213, primals_214, primals_215, primals_216, primals_217, primals_218, primals_219, primals_220, primals_221, primals_222, primals_223, primals_224, primals_225, primals_226, primals_227, primals_228, primals_229, primals_230, primals_231, primals_232, primals_233, primals_234, primals_235, primals_236, primals_237, primals_238, primals_239, primals_240, primals_241, primals_242, primals_243, primals_244, primals_245, primals_246, primals_247, primals_248, primals_249, primals_250, primals_251, primals_252, primals_253, primals_254, primals_255, primals_256, primals_257, primals_258, primals_259, primals_260, primals_261, primals_262, primals_263, primals_264, primals_265, primals_266, primals_267, primals_268, primals_269, primals_270, primals_271, primals_272, primals_273, primals_274, primals_275, primals_276, primals_277, primals_278, primals_279, primals_280, primals_281, primals_282, primals_283, primals_284, primals_285, primals_286, primals_287, primals_288, primals_289, primals_290, primals_291, primals_292, primals_293, primals_294, primals_295, primals_296, primals_297, primals_298, primals_299, primals_300, primals_301, primals_302, primals_303, primals_304, primals_305, primals_306, primals_307, primals_308, primals_309, primals_310, primals_311, primals_312, primals_313, primals_314, primals_315, primals_316, primals_317, primals_318, primals_319, primals_320, primals_321, primals_322, primals_323, primals_324, primals_325, primals_326, primals_327, primals_328, primals_329, primals_330, primals_331, primals_332, primals_333, primals_334, primals_335, primals_336, primals_337, primals_338, primals_339, primals_340, primals_341, primals_342, primals_343]) return output[0]
wwjfsfs/wwjyyds
MMFB
false
13,231
[ "MIT" ]
0
80cd6267fde7cd98838078a0d5178a557ceb7414
https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414
Net
import torch from torch import nn import torch.nn.functional as F import torch.optim class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size= 3, padding=1) self.max2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size =3, padding=1) self.max4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1) self.max6 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv7 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1) self.max8 = nn.MaxPool2d(kernel_size=2, stride=2) self.fc9 = nn.Linear(256 * 14 * 14, 4096) self.fc10 = nn.Linear(4096, 133) self.dropout = nn.Dropout(0.25) def forward(self, x): x = F.relu(self.conv1(x)) x = self.max2(x) x = F.relu(self.conv3(x)) x = self.max4(x) x = F.relu(self.conv5(x)) x = self.max6(x) x = F.relu(self.conv7(x)) x = self.max8(x) x = x.view(-1, 256 * 14 * 14) x = self.dropout(x) x = F.relu(self.fc9(x)) x = self.dropout(x) x = self.fc10(x) return x def get_inputs(): return [torch.rand([4, 3, 121, 121])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 14641 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 14641 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 43923 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1874048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 % 60 x2 = xindex // 1920 % 60 x3 = xindex // 115200 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 7744 * x2 + 468512 * x3), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3), None) tmp3 = tl.load(in_ptr0 + (3872 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3 ), None) tmp5 = tl.load(in_ptr0 + (3904 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3 ), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x4, tmp6, None) tl.store(out_ptr1 + x4, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 30 x2 = xindex // 1920 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 7680 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 7680 * x2), xmask) tmp3 = tl.load(in_ptr0 + (3840 + x0 + 128 * x1 + 7680 * x2), xmask) tmp5 = tl.load(in_ptr0 + (3904 + x0 + 128 * x1 + 7680 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 % 15 x2 = xindex // 1920 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 7680 * x2), xmask) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 7680 * x2), xmask) tmp3 = tl.load(in_ptr0 + (3840 + x0 + 256 * x1 + 7680 * x2), xmask) tmp5 = tl.load(in_ptr0 + (3968 + x0 + 256 * x1 + 7680 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 196 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y0 = yindex % 7 y1 = yindex // 7 % 7 y2 = yindex // 49 y4 = yindex y5 = yindex % 49 tmp0 = tl.load(in_ptr0 + (x3 + 512 * y0 + 7680 * y1 + 57600 * y2), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (256 + x3 + 512 * y0 + 7680 * y1 + 57600 * y2), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (3840 + x3 + 512 * y0 + 7680 * y1 + 57600 * y2 ), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4096 + x3 + 512 * y0 + 7680 * y1 + 57600 * y2), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x3 + 256 * y4), tmp15, xmask & ymask) tl.store(out_ptr1 + (y5 + 49 * x3 + 12544 * y2), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 121, 121), (43923, 14641, 121, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4096, 50176), (50176, 1)) assert_size_stride(primals_11, (4096,), (1,)) assert_size_stride(primals_12, (133, 4096), (4096, 1)) assert_size_stride(primals_13, (133,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(96, 9)](primals_1, buf0, 96, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 121, 121), (43923, 1, 363, 3), torch.float32) triton_poi_fused_1[grid(12, 14641)](primals_3, buf1, 12, 14641, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(32768, 9)](primals_8, buf4, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 121, 121), (468512, 1, 3872, 32)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(1874048)](buf6, primals_2, 1874048, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32), torch.float32) buf8 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(460800)](buf6, buf7, buf8, 460800, XBLOCK=512, num_warps=8, num_stages=1) buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 60, 60), (230400, 1, 3840, 64)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(921600)](buf10, primals_5, 921600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf11 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.float32) buf12 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf10, buf11, buf12, 230400, XBLOCK=512, num_warps=8, num_stages=1) buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 30, 30), (115200, 1, 3840, 128)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_9[grid(460800)](buf14, primals_7, 460800, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf15 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128), torch.float32) buf16 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(115200)](buf14, buf15, buf16, 115200, XBLOCK=512, num_warps=8, num_stages=1) buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_11[grid(230400)](buf18, primals_9, 230400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf19 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256), torch.int8) buf20 = empty_strided_cuda((4, 256, 7, 7), (12544, 49, 7, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_12[grid(196, 256)](buf18, buf19, buf20, 196, 256, XBLOCK=256, YBLOCK=2, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((1, 4096), (4096, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf20, (1, 50176), (0, 1), 0), reinterpret_tensor(primals_10, (50176, 4096), (1, 50176), 0), out=buf21) buf22 = buf21 del buf21 triton_poi_fused_relu_13[grid(4096)](buf22, primals_11, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf23 = empty_strided_cuda((1, 133), (133, 1), torch.float32) extern_kernels.addmm(primals_13, buf22, reinterpret_tensor( primals_12, (4096, 133), (1, 4096), 0), alpha=1, beta=1, out=buf23) del primals_13 return (buf23, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor (buf20, (1, 50176), (50176, 1), 0), buf22, primals_12, primals_10) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size= 3, padding=1) self.max2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size =3, padding=1) self.max4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1) self.max6 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv7 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1) self.max8 = nn.MaxPool2d(kernel_size=2, stride=2) self.fc9 = nn.Linear(256 * 14 * 14, 4096) self.fc10 = nn.Linear(4096, 133) self.dropout = nn.Dropout(0.25) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv3.weight primals_5 = self.conv3.bias primals_6 = self.conv5.weight primals_7 = self.conv5.bias primals_8 = self.conv7.weight primals_9 = self.conv7.bias primals_10 = self.fc9.weight primals_11 = self.fc9.bias primals_12 = self.fc10.weight primals_13 = self.fc10.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
kawano8811/deep-learning-v2-pytorch
Net
false
13,232
[ "MIT" ]
0
b7c453728cb85edf3b30e0aeb66b3861747bc043
https://github.com/kawano8811/deep-learning-v2-pytorch/tree/b7c453728cb85edf3b30e0aeb66b3861747bc043
VGGBase
import torch import torchvision import torch.nn.functional as F from torch import nn import torch.optim import torch.utils.data def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBase, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 512, 512) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) out = F.relu(self.conv1_2(out)) out = self.pool1(out) out = F.relu(self.conv2_1(out)) out = F.relu(self.conv2_2(out)) out = self.pool2(out) out = F.relu(self.conv3_1(out)) out = F.relu(self.conv3_2(out)) out = F.relu(self.conv3_3(out)) out = self.pool3(out) out = F.relu(self.conv4_1(out)) out = F.relu(self.conv4_2(out)) out = F.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = F.relu(self.conv5_1(out)) out = F.relu(self.conv5_2(out)) out = F.relu(self.conv5_3(out)) out = self.pool5(out) out = F.relu(self.conv6(out)) conv7_feats = F.relu(self.conv7(out)) return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torchvision from torch import nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_17(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_18(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = xindex // 4 y4 = yindex x5 = xindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1, 1], 1, tl.int8) tmp9 = tl.full([1, 1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1, 1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1, 1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (y0 + 512 * x5 + 8192 * y1), tmp6, xmask) tl.store(out_ptr1 + (y0 + 512 * x5 + 8192 * y1), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 2048 % 4 x1 = xindex // 512 % 4 x6 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-2560 + x6), tmp10, other=float('-inf')) tmp12 = x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-2048 + x6), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-1536 + x6), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x2 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-512 + x6), tmp30, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x6, tmp33, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (512 + x6), tmp36, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x2 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (1536 + x6), tmp43, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (2048 + x6), tmp46, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (2560 + x6), tmp49, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x6, tmp51, None) tl.store(out_ptr1 + x6, tmp76, None) @triton.jit def triton_poi_fused_convolution_relu_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 1024 y1 = yindex // 1024 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 1024 * x2 + 16384 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 1024 * x2 + 16384 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (1024,), (1,)) assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_31, (1024,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = empty_strided_cuda((1024, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_9[grid(524288, 9)](primals_28, buf14, 524288, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_28 buf15 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_10[grid(1048576)](buf16, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_10[grid(1048576)](buf18, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf20 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_11[grid(262144)](buf18, buf19, buf20, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_12[grid(524288)](buf22, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_12[grid(524288)](buf24, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_13[grid(131072)](buf24, buf25, buf26, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf28 = buf27 del buf27 triton_poi_fused_convolution_relu_14[grid(262144)](buf28, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf30 = buf29 del buf29 triton_poi_fused_convolution_relu_14[grid(262144)](buf30, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf32 = buf31 del buf31 triton_poi_fused_convolution_relu_14[grid(262144)](buf32, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf34 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_15[grid(65536)](buf32, buf33, buf34, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf35 = extern_kernels.convolution(buf33, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf36 = buf35 del buf35 triton_poi_fused_convolution_relu_16[grid(131072)](buf36, primals_17, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_17 buf37 = extern_kernels.convolution(buf36, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf38 = buf37 del buf37 triton_poi_fused_convolution_relu_16[grid(131072)](buf38, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf39 = extern_kernels.convolution(buf38, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused_convolution_relu_17[grid(2048, 64)](buf39, primals_21, buf40, 2048, 64, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf39 del primals_21 buf41 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf42 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_18[grid(2048, 16)](buf40, buf41, buf42, 2048, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf43 = extern_kernels.convolution(buf41, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf44 = buf43 del buf43 triton_poi_fused_convolution_relu_19[grid(32768)](buf44, primals_23, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_23 buf45 = extern_kernels.convolution(buf44, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf46 = buf45 del buf45 triton_poi_fused_convolution_relu_19[grid(32768)](buf46, primals_25, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_25 buf47 = extern_kernels.convolution(buf46, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf48 = buf47 del buf47 triton_poi_fused_convolution_relu_19[grid(32768)](buf48, primals_27, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_27 buf49 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf50 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_20[grid(32768)](buf48, buf49, buf50, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf51 = extern_kernels.convolution(buf49, buf14, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf52 = buf51 del buf51 triton_poi_fused_convolution_relu_21[grid(65536)](buf52, primals_29, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_29 buf53 = extern_kernels.convolution(buf52, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf54 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.float32) buf55 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_22[grid(4096, 16) ](buf53, primals_31, buf54, buf55, 4096, 16, XBLOCK=16, YBLOCK= 64, num_warps=4, num_stages=1) del buf53 del primals_31 return (buf40, buf54, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf33, buf34, buf36, buf38, buf40, buf41, buf42, buf44, buf46, buf48, buf49, buf50, buf52, buf55) def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor class VGGBaseNew(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBaseNew, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_28 = self.conv6.weight primals_29 = self.conv6.bias primals_30 = self.conv7.weight primals_31 = self.conv7.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31]) return output[0], output[1]
doduythao/ssd
VGGBase
false
13,233
[ "MIT" ]
0
170064a3edef05d3274b08ea7f622eb3238b5c5c
https://github.com/doduythao/ssd/tree/170064a3edef05d3274b08ea7f622eb3238b5c5c
SSD512
import torch import torchvision from math import sqrt import torch.nn.functional as F from torch import nn import torch.optim import torch.utils.data def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor def cxcy_to_xy(cxcy): return torch.cat([cxcy[:, :2] - cxcy[:, 2:] / 2, cxcy[:, :2] + cxcy[:, 2:] / 2], 1) def find_intersection(set_1, set_2): """ Find the intersection of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2]. unsqueeze(0)) upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:]. unsqueeze(0)) intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] def find_jaccard_overlap(set_1, set_2): """ Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ intersection = find_intersection(set_1, set_2) areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection return intersection / union def gcxgcy_to_cxcy(gcxgcy, priors_cxcy): """ Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above. They are decoded into center-size coordinates. This is the inverse of the function above. :param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4) :param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4) :return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4) """ return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy [:, :2], torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1) class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBase, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 512, 512) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) out = F.relu(self.conv1_2(out)) out = self.pool1(out) out = F.relu(self.conv2_1(out)) out = F.relu(self.conv2_2(out)) out = self.pool2(out) out = F.relu(self.conv3_1(out)) out = F.relu(self.conv3_2(out)) out = F.relu(self.conv3_3(out)) out = self.pool3(out) out = F.relu(self.conv4_1(out)) out = F.relu(self.conv4_2(out)) out = F.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = F.relu(self.conv5_1(out)) out = F.relu(self.conv5_2(out)) out = F.relu(self.conv5_3(out)) out = self.pool5(out) out = F.relu(self.conv6(out)) conv7_feats = F.relu(self.conv7(out)) return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None class AuxiliaryConvolutions(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutions, self).__init__() self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.conv12_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv12_2 = nn.Conv2d(128, 256, kernel_size=2, padding=0) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv7_feats): """ Forward propagation. :param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 32, 32) :return: higher-level feature maps conv8_2, conv9_2, conv10_2, conv11_2, conv12_2 """ out = F.relu(self.conv8_1(conv7_feats)) out = F.relu(self.conv8_2(out)) conv8_2_feats = out out = F.relu(self.conv9_1(out)) out = F.relu(self.conv9_2(out)) conv9_2_feats = out out = F.relu(self.conv10_1(out)) out = F.relu(self.conv10_2(out)) conv10_2_feats = out out = F.relu(self.conv11_1(out)) out = F.relu(self.conv11_2(out)) conv11_2_feats = out out = F.relu(self.conv12_1(out)) conv12_2_feats = F.relu(self.conv12_2(out)) return (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats) class PredictionConvolutions(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 24564 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 24564 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 6, 'conv11_2': 4, 'conv12_2': 4} self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size= 3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) self.loc_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * 4, kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * n_classes, kernel_size=3, padding=1) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats): batch_size = conv4_3_feats.size(0) l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous() l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() l_conv7 = l_conv7.view(batch_size, -1, 4) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) l_conv12_2 = self.loc_conv12_2(conv12_2_feats) l_conv12_2 = l_conv12_2.permute(0, 2, 3, 1).contiguous() l_conv12_2 = l_conv12_2.view(batch_size, -1, 4) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous() c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() c_conv7 = c_conv7.view(batch_size, -1, self.n_classes) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) c_conv12_2 = self.cl_conv12_2(conv12_2_feats) c_conv12_2 = c_conv12_2.permute(0, 2, 3, 1).contiguous() c_conv12_2 = c_conv12_2.view(batch_size, -1, self.n_classes) locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2, l_conv12_2], dim=1) classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2, c_conv12_2], dim=1) return locs, classes_scores class SSD512(nn.Module): """ The SSD512 network - encapsulates the base VGG network, auxiliary, and prediction convolutions. """ def __init__(self, n_classes): super(SSD512, self).__init__() self.n_classes = n_classes self.base = VGGBase() self.aux_convs = AuxiliaryConvolutions() self.pred_convs = PredictionConvolutions(n_classes) self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) nn.init.constant_(self.rescale_factors, 20) self.priors_cxcy = self.create_prior_boxes() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 512, 512) :return: 24564 locations and class scores (i.e. w.r.t each prior box) for each image """ conv4_3_feats, conv7_feats = self.base(image) norm = conv4_3_feats.pow(2).sum(dim=1, keepdim=True).sqrt() conv4_3_feats = conv4_3_feats / norm conv4_3_feats = conv4_3_feats * self.rescale_factors (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats) = self.aux_convs(conv7_feats) locs, classes_scores = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats) return locs, classes_scores def create_prior_boxes(self): """ Create the 24564 prior (default) boxes for the SSD512, as defined in the paper. :return: prior boxes in center-size coordinates, a tensor of dimensions (24564, 4) """ fmap_dims = {'conv4_3': 64, 'conv7': 32, 'conv8_2': 16, 'conv9_2': 8, 'conv10_2': 4, 'conv11_2': 2, 'conv12_2': 1} obj_scales = {'conv4_3': 0.07, 'conv7': 0.15, 'conv8_2': 0.3, 'conv9_2': 0.45, 'conv10_2': 0.6, 'conv11_2': 0.75, 'conv12_2': 0.9 } aspect_ratios = {'conv4_3': [1.0, 2.0, 0.5], 'conv7': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv8_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv9_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv10_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv11_2': [1.0, 2.0, 0.5], 'conv12_2': [1.0, 2.0, 0.5]} fmaps = sorted(list(fmap_dims.keys())) prior_boxes = [] for k, fmap in enumerate(fmaps): for i in range(fmap_dims[fmap]): for j in range(fmap_dims[fmap]): cx = (j + 0.5) / fmap_dims[fmap] cy = (i + 0.5) / fmap_dims[fmap] for ratio in aspect_ratios[fmap]: prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt (ratio), obj_scales[fmap] / sqrt(ratio)]) if ratio == 1.0: try: additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]]) except IndexError: additional_scale = 1.0 prior_boxes.append([cx, cy, additional_scale, additional_scale]) prior_boxes = torch.FloatTensor(prior_boxes) prior_boxes.clamp_(0, 1) return prior_boxes def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k): """ Decipher the 24564 locations and class scores (output of ths SSD512) to detect objects. For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold. :param predicted_locs: predicted locations/boxes w.r.t the 24564 prior boxes, a tensor of dimensions :param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions :param min_score: minimum threshold for a box to be considered a match for a certain class :param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS :param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k' :return: detections (boxes, labels, and scores), lists of length batch_size """ batch_size = predicted_locs.size(0) n_priors = self.priors_cxcy.size(0) predicted_scores = F.softmax(predicted_scores, dim=2) all_images_boxes = list() all_images_labels = list() all_images_scores = list() assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) for i in range(batch_size): decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) image_boxes = list() image_labels = list() image_scores = list() for c in range(1, self.n_classes): class_scores = predicted_scores[i][:, c] score_above_min_score = class_scores > min_score n_above_min_score = score_above_min_score.sum().item() if n_above_min_score == 0: continue class_scores = class_scores[score_above_min_score] class_decoded_locs = decoded_locs[score_above_min_score] class_scores, sort_ind = class_scores.sort(dim=0, descending=True) class_decoded_locs = class_decoded_locs[sort_ind] overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) suppress = torch.zeros(n_above_min_score, dtype=torch.bool) for box in range(class_decoded_locs.size(0)): if suppress[box] == 1: continue suppress = suppress | (overlap[box] > max_overlap) suppress[box] = 0 image_boxes.append(class_decoded_locs[~suppress]) image_labels.append(torch.LongTensor((~suppress).sum().item () * [c])) image_scores.append(class_scores[~suppress]) if len(image_boxes) == 0: image_boxes.append(torch.FloatTensor([[0.0, 0.0, 1.0, 1.0]])) image_labels.append(torch.LongTensor([0])) image_scores.append(torch.FloatTensor([0.0])) image_boxes = torch.cat(image_boxes, dim=0) image_labels = torch.cat(image_labels, dim=0) image_scores = torch.cat(image_scores, dim=0) n_objects = image_scores.size(0) if n_objects > top_k: image_scores, sort_ind = image_scores.sort(dim=0, descending=True) image_scores = image_scores[:top_k] image_boxes = image_boxes[sort_ind][:top_k] image_labels = image_labels[sort_ind][:top_k] all_images_boxes.append(image_boxes) all_images_labels.append(image_labels) all_images_scores.append(image_scores) return all_images_boxes, all_images_labels, all_images_scores def get_inputs(): return [torch.rand([4, 3, 512, 512])] def get_init_inputs(): return [[], {'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torchvision from math import sqrt import torch.nn.functional as F from torch import nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 262144 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 1024 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 1024 * x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (512 + 2 * x0 + 1024 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (513 + 2 * x0 + 1024 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 512 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (256 + 2 * x0 + 512 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (257 + 2 * x0 + 512 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16384 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 256 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 256 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (128 + 2 * x0 + 256 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (129 + 2 * x0 + 256 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-33 + x4), tmp10, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-32 + x4), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-31 + x4), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x4, tmp33, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (31 + x4), tmp43, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (32 + x4), tmp46, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (33 + x4), tmp49, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x4, tmp51, None) tl.store(out_ptr1 + x4, tmp76, None) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_red_fused_pow_sqrt_sum_11(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 2097152 * x1), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp3) tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_div_mul_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 2097152 x1 = xindex // 4096 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp2, None) tl.store(out_ptr1 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 393024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 24564 x0 = xindex % 4 x2 = xindex // 98256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 16384, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4096 * ((x0 + 4 * x1) % 16) + 65536 * ((x0 + 4 * x1 + 65536 * x2) // 65536 % 4) + (x0 + 4 * x1) // 16 % 4096), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 4 * x1) % 16, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 22528, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (1024 * ((x0 + 4 * (-16384 + x1)) % 24) + 24576 * ((x0 + 4 * (-16384 + x1) + 24576 * x2) // 24576 % 4) + (x0 + 4 * (-16384 + x1)) // 24 % 1024), tmp13 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.load(in_ptr3 + (x0 + 4 * (-16384 + x1)) % 24, tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 24064, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + (256 * ((x0 + 4 * (-22528 + x1)) % 24) + 6144 * ((x0 + 4 * (-22528 + x1) + 6144 * x2) // 6144 % 4) + (x0 + 4 * (- 22528 + x1)) // 24 % 256), tmp22 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tl.load(in_ptr5 + (x0 + 4 * (-22528 + x1)) % 24, tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tmp29 = tl.full([1], 24448, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr6 + (64 * ((x0 + 4 * (-24064 + x1)) % 24) + 1536 * ((x0 + 4 * (-24064 + x1) + 1536 * x2) // 1536 % 4) + (x0 + 4 * (- 24064 + x1)) // 24 % 64), tmp31 & xmask, eviction_policy= 'evict_last', other=0.0) tmp33 = tl.load(in_ptr7 + (x0 + 4 * (-24064 + x1)) % 24, tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp32 + tmp33 tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp31, tmp34, tmp35) tmp37 = tmp0 >= tmp29 tmp38 = tl.full([1], 24544, tl.int64) tmp39 = tmp0 < tmp38 tmp40 = tmp37 & tmp39 tmp41 = tl.load(in_ptr8 + (16 * ((x0 + 4 * (-24448 + x1)) % 24) + 384 * ((x0 + 4 * (-24448 + x1) + 384 * x2) // 384 % 4) + (x0 + 4 * (- 24448 + x1)) // 24 % 16), tmp40 & xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr9 + (x0 + 4 * (-24448 + x1)) % 24, tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 + tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp40, tmp43, tmp44) tmp46 = tmp0 >= tmp38 tmp47 = tl.full([1], 24560, tl.int64) tmp48 = tmp0 < tmp47 tmp49 = tmp46 & tmp48 tmp50 = tl.load(in_ptr10 + (4 * ((x0 + 4 * (-24544 + x1)) % 16) + 64 * ((x0 + 4 * (-24544 + x1) + 64 * x2) // 64 % 4) + (x0 + 4 * (-24544 + x1)) // 16 % 4), tmp49 & xmask, eviction_policy='evict_last', other=0.0 ) tmp51 = tl.load(in_ptr11 + (x0 + 4 * (-24544 + x1)) % 16, tmp49 & xmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 + tmp51 tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp49, tmp52, tmp53) tmp55 = tmp0 >= tmp47 tl.full([1], 24564, tl.int64) tmp58 = tl.load(in_ptr12 + (x0 + 4 * (-24560 + x1) + 16 * x2), tmp55 & xmask, other=0.0) tmp59 = tl.load(in_ptr13 + (x0 + 4 * (-24560 + x1)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tmp58 + tmp59 tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp55, tmp60, tmp61) tmp63 = tl.where(tmp49, tmp54, tmp62) tmp64 = tl.where(tmp40, tmp45, tmp63) tmp65 = tl.where(tmp31, tmp36, tmp64) tmp66 = tl.where(tmp22, tmp27, tmp65) tmp67 = tl.where(tmp13, tmp18, tmp66) tmp68 = tl.where(tmp4, tmp9, tmp67) tl.store(out_ptr0 + x3, tmp68, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 512, 512), (786432, 262144, 512, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (1024,), (1,)) assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_31, (1024,), (1,)) assert_size_stride(primals_32, (1, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_34, (256,), (1,)) assert_size_stride(primals_35, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_36, (512,), (1,)) assert_size_stride(primals_37, (128, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_38, (128,), (1,)) assert_size_stride(primals_39, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_40, (256,), (1,)) assert_size_stride(primals_41, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_42, (128,), (1,)) assert_size_stride(primals_43, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_44, (256,), (1,)) assert_size_stride(primals_45, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_46, (128,), (1,)) assert_size_stride(primals_47, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_48, (256,), (1,)) assert_size_stride(primals_49, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_50, (128,), (1,)) assert_size_stride(primals_51, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_52, (256,), (1,)) assert_size_stride(primals_53, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_54, (16,), (1,)) assert_size_stride(primals_55, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_56, (24,), (1,)) assert_size_stride(primals_57, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_58, (24,), (1,)) assert_size_stride(primals_59, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_60, (24,), (1,)) assert_size_stride(primals_61, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_62, (24,), (1,)) assert_size_stride(primals_63, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_64, (16,), (1,)) assert_size_stride(primals_65, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_66, (16,), (1,)) assert_size_stride(primals_67, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_68, (16,), (1,)) assert_size_stride(primals_69, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_70, (24,), (1,)) assert_size_stride(primals_71, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_72, (24,), (1,)) assert_size_stride(primals_73, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_74, (24,), (1,)) assert_size_stride(primals_75, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_76, (24,), (1,)) assert_size_stride(primals_77, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_78, (16,), (1,)) assert_size_stride(primals_79, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_80, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 512, 512), (16777216, 262144, 512, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(67108864)](buf1, primals_2, 67108864, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 512, 512), (16777216, 262144, 512, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(67108864)](buf3, primals_5, 67108864, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(16777216)](buf3, buf4, buf5, 16777216, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 256, 256), (8388608, 65536, 256, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(33554432)](buf7, primals_7, 33554432, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 256, 256), (8388608, 65536, 256, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(33554432)](buf9, primals_9, 33554432, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(8388608)](buf9, buf10, buf11, 8388608, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(16777216)](buf13, primals_11, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_4[grid(16777216)](buf15, primals_13, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_4[grid(16777216)](buf17, primals_15, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf18 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) buf19 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(4194304)](buf17, buf18, buf19, 4194304, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_6[grid(8388608)](buf21, primals_17, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_6[grid(8388608)](buf23, primals_19, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_6[grid(8388608)](buf25, primals_21, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf26 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32) buf27 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(2097152)](buf25, buf26, buf27, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf28 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_8[grid(2097152)](buf29, primals_23, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_8[grid(2097152)](buf31, primals_25, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_relu_8[grid(2097152)](buf33, primals_27, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf34 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32) buf35 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_9[grid(2097152)](buf33, buf34, buf35, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf36 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 1024, 32, 32), (1048576, 1024, 32, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_10[grid(4194304)](buf37, primals_29, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1024, 32, 32), (1048576, 1024, 32, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_10[grid(4194304)](buf39, primals_31, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf40 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf41 = reinterpret_tensor(buf40, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf40 triton_red_fused_pow_sqrt_sum_11[grid(16384)](buf41, buf25, 16384, 512, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) buf42 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) buf43 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) triton_poi_fused_div_mul_12[grid(8388608)](buf25, buf41, primals_32, buf42, buf43, 8388608, XBLOCK=512, num_warps=8, num_stages=1) buf44 = extern_kernels.convolution(buf39, primals_33, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf45 = buf44 del buf44 triton_poi_fused_convolution_relu_13[grid(1048576)](buf45, primals_34, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_34 buf46 = extern_kernels.convolution(buf45, primals_35, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 512, 16, 16), (131072, 256, 16, 1)) buf47 = buf46 del buf46 triton_poi_fused_convolution_relu_14[grid(524288)](buf47, primals_36, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_36 buf48 = extern_kernels.convolution(buf47, primals_37, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 128, 16, 16), (32768, 256, 16, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_relu_15[grid(131072)](buf49, primals_38, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_38 buf50 = extern_kernels.convolution(buf49, primals_39, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 256, 8, 8), (16384, 64, 8, 1)) buf51 = buf50 del buf50 triton_poi_fused_convolution_relu_16[grid(65536)](buf51, primals_40, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_40 buf52 = extern_kernels.convolution(buf51, primals_41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 128, 8, 8), (8192, 64, 8, 1)) buf53 = buf52 del buf52 triton_poi_fused_convolution_relu_17[grid(32768)](buf53, primals_42, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_42 buf54 = extern_kernels.convolution(buf53, primals_43, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 4, 4), (4096, 16, 4, 1)) buf55 = buf54 del buf54 triton_poi_fused_convolution_relu_18[grid(16384)](buf55, primals_44, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_44 buf56 = extern_kernels.convolution(buf55, primals_45, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 128, 4, 4), (2048, 16, 4, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_relu_19[grid(8192)](buf57, primals_46, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_46 buf58 = extern_kernels.convolution(buf57, primals_47, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 256, 2, 2), (1024, 4, 2, 1)) buf59 = buf58 del buf58 triton_poi_fused_convolution_relu_20[grid(4096)](buf59, primals_48, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_48 buf60 = extern_kernels.convolution(buf59, primals_49, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 128, 2, 2), (512, 4, 2, 1)) buf61 = buf60 del buf60 triton_poi_fused_convolution_relu_21[grid(2048)](buf61, primals_50, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_50 buf62 = extern_kernels.convolution(buf61, primals_51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 256, 1, 1), (256, 1, 1, 1)) buf63 = buf62 del buf62 triton_poi_fused_convolution_relu_22[grid(1024)](buf63, primals_52, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_52 buf64 = extern_kernels.convolution(buf43, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf65 = extern_kernels.convolution(buf39, primals_55, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 24, 32, 32), (24576, 1024, 32, 1)) buf66 = extern_kernels.convolution(buf47, primals_57, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 24, 16, 16), (6144, 256, 16, 1)) buf67 = extern_kernels.convolution(buf51, primals_59, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 24, 8, 8), (1536, 64, 8, 1)) buf68 = extern_kernels.convolution(buf55, primals_61, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 24, 4, 4), (384, 16, 4, 1)) buf69 = extern_kernels.convolution(buf59, primals_63, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf69, (4, 16, 2, 2), (64, 4, 2, 1)) buf70 = extern_kernels.convolution(buf63, primals_65, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 16, 1, 1), (16, 1, 1, 1)) buf71 = extern_kernels.convolution(buf43, primals_67, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf71, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf72 = extern_kernels.convolution(buf39, primals_69, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf72, (4, 24, 32, 32), (24576, 1024, 32, 1)) buf73 = extern_kernels.convolution(buf47, primals_71, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf73, (4, 24, 16, 16), (6144, 256, 16, 1)) buf74 = extern_kernels.convolution(buf51, primals_73, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf74, (4, 24, 8, 8), (1536, 64, 8, 1)) buf75 = extern_kernels.convolution(buf55, primals_75, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf75, (4, 24, 4, 4), (384, 16, 4, 1)) buf76 = extern_kernels.convolution(buf59, primals_77, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf76, (4, 16, 2, 2), (64, 4, 2, 1)) buf77 = extern_kernels.convolution(buf63, primals_79, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf77, (4, 16, 1, 1), (16, 1, 1, 1)) buf78 = empty_strided_cuda((4, 24564, 4), (98256, 4, 1), torch.float32) triton_poi_fused_cat_23[grid(393024)](buf64, primals_54, buf65, primals_56, buf66, primals_58, buf67, primals_60, buf68, primals_62, buf69, primals_64, buf70, primals_66, buf78, 393024, XBLOCK=512, num_warps=8, num_stages=1) del buf64 del buf65 del buf66 del buf67 del buf68 del buf69 del buf70 del primals_54 del primals_56 del primals_58 del primals_60 del primals_62 del primals_64 del primals_66 buf79 = empty_strided_cuda((4, 24564, 4), (98256, 4, 1), torch.float32) triton_poi_fused_cat_23[grid(393024)](buf71, primals_68, buf72, primals_70, buf73, primals_72, buf74, primals_74, buf75, primals_76, buf76, primals_78, buf77, primals_80, buf79, 393024, XBLOCK=512, num_warps=8, num_stages=1) del buf71 del buf72 del buf73 del buf74 del buf75 del buf76 del buf77 del primals_68 del primals_70 del primals_72 del primals_74 del primals_76 del primals_78 del primals_80 return (buf78, buf79, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_33, primals_35, primals_37, primals_39, primals_41, primals_43, primals_45, primals_47, primals_49, primals_51, primals_53, primals_55, primals_57, primals_59, primals_61, primals_63, primals_65, primals_67, primals_69, primals_71, primals_73, primals_75, primals_77, primals_79, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf25, buf26, buf27, buf29, buf31, buf33, buf34, buf35, buf37, buf39, buf41, buf42, buf43, buf45, buf47, buf49, buf51, buf53, buf55, buf57, buf59, buf61, buf63) def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor def cxcy_to_xy(cxcy): return torch.cat([cxcy[:, :2] - cxcy[:, 2:] / 2, cxcy[:, :2] + cxcy[:, 2:] / 2], 1) def find_intersection(set_1, set_2): """ Find the intersection of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2]. unsqueeze(0)) upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:]. unsqueeze(0)) intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] def find_jaccard_overlap(set_1, set_2): """ Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ intersection = find_intersection(set_1, set_2) areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection return intersection / union def gcxgcy_to_cxcy(gcxgcy, priors_cxcy): """ Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above. They are decoded into center-size coordinates. This is the inverse of the function above. :param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4) :param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4) :return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4) """ return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy [:, :2], torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1) class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBase, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 512, 512) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) out = F.relu(self.conv1_2(out)) out = self.pool1(out) out = F.relu(self.conv2_1(out)) out = F.relu(self.conv2_2(out)) out = self.pool2(out) out = F.relu(self.conv3_1(out)) out = F.relu(self.conv3_2(out)) out = F.relu(self.conv3_3(out)) out = self.pool3(out) out = F.relu(self.conv4_1(out)) out = F.relu(self.conv4_2(out)) out = F.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = F.relu(self.conv5_1(out)) out = F.relu(self.conv5_2(out)) out = F.relu(self.conv5_3(out)) out = self.pool5(out) out = F.relu(self.conv6(out)) conv7_feats = F.relu(self.conv7(out)) return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None class AuxiliaryConvolutions(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutions, self).__init__() self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.conv12_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv12_2 = nn.Conv2d(128, 256, kernel_size=2, padding=0) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv7_feats): """ Forward propagation. :param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 32, 32) :return: higher-level feature maps conv8_2, conv9_2, conv10_2, conv11_2, conv12_2 """ out = F.relu(self.conv8_1(conv7_feats)) out = F.relu(self.conv8_2(out)) conv8_2_feats = out out = F.relu(self.conv9_1(out)) out = F.relu(self.conv9_2(out)) conv9_2_feats = out out = F.relu(self.conv10_1(out)) out = F.relu(self.conv10_2(out)) conv10_2_feats = out out = F.relu(self.conv11_1(out)) out = F.relu(self.conv11_2(out)) conv11_2_feats = out out = F.relu(self.conv12_1(out)) conv12_2_feats = F.relu(self.conv12_2(out)) return (conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats) class PredictionConvolutions(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 24564 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 24564 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 6, 'conv11_2': 4, 'conv12_2': 4} self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size= 3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) self.loc_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * 4, kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv12_2 = nn.Conv2d(256, n_boxes['conv12_2'] * n_classes, kernel_size=3, padding=1) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats, conv12_2_feats): batch_size = conv4_3_feats.size(0) l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous() l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() l_conv7 = l_conv7.view(batch_size, -1, 4) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) l_conv12_2 = self.loc_conv12_2(conv12_2_feats) l_conv12_2 = l_conv12_2.permute(0, 2, 3, 1).contiguous() l_conv12_2 = l_conv12_2.view(batch_size, -1, 4) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous() c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() c_conv7 = c_conv7.view(batch_size, -1, self.n_classes) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) c_conv12_2 = self.cl_conv12_2(conv12_2_feats) c_conv12_2 = c_conv12_2.permute(0, 2, 3, 1).contiguous() c_conv12_2 = c_conv12_2.view(batch_size, -1, self.n_classes) locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2, l_conv12_2], dim=1) classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2, c_conv12_2], dim=1) return locs, classes_scores class SSD512New(nn.Module): """ The SSD512 network - encapsulates the base VGG network, auxiliary, and prediction convolutions. """ def __init__(self, n_classes): super(SSD512New, self).__init__() self.n_classes = n_classes self.base = VGGBase() self.aux_convs = AuxiliaryConvolutions() self.pred_convs = PredictionConvolutions(n_classes) self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) nn.init.constant_(self.rescale_factors, 20) self.priors_cxcy = self.create_prior_boxes() def create_prior_boxes(self): """ Create the 24564 prior (default) boxes for the SSD512, as defined in the paper. :return: prior boxes in center-size coordinates, a tensor of dimensions (24564, 4) """ fmap_dims = {'conv4_3': 64, 'conv7': 32, 'conv8_2': 16, 'conv9_2': 8, 'conv10_2': 4, 'conv11_2': 2, 'conv12_2': 1} obj_scales = {'conv4_3': 0.07, 'conv7': 0.15, 'conv8_2': 0.3, 'conv9_2': 0.45, 'conv10_2': 0.6, 'conv11_2': 0.75, 'conv12_2': 0.9 } aspect_ratios = {'conv4_3': [1.0, 2.0, 0.5], 'conv7': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv8_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv9_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv10_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv11_2': [1.0, 2.0, 0.5], 'conv12_2': [1.0, 2.0, 0.5]} fmaps = sorted(list(fmap_dims.keys())) prior_boxes = [] for k, fmap in enumerate(fmaps): for i in range(fmap_dims[fmap]): for j in range(fmap_dims[fmap]): cx = (j + 0.5) / fmap_dims[fmap] cy = (i + 0.5) / fmap_dims[fmap] for ratio in aspect_ratios[fmap]: prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt (ratio), obj_scales[fmap] / sqrt(ratio)]) if ratio == 1.0: try: additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]]) except IndexError: additional_scale = 1.0 prior_boxes.append([cx, cy, additional_scale, additional_scale]) prior_boxes = torch.FloatTensor(prior_boxes) prior_boxes.clamp_(0, 1) return prior_boxes def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k): """ Decipher the 24564 locations and class scores (output of ths SSD512) to detect objects. For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold. :param predicted_locs: predicted locations/boxes w.r.t the 24564 prior boxes, a tensor of dimensions :param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions :param min_score: minimum threshold for a box to be considered a match for a certain class :param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS :param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k' :return: detections (boxes, labels, and scores), lists of length batch_size """ batch_size = predicted_locs.size(0) n_priors = self.priors_cxcy.size(0) predicted_scores = F.softmax(predicted_scores, dim=2) all_images_boxes = list() all_images_labels = list() all_images_scores = list() assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) for i in range(batch_size): decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) image_boxes = list() image_labels = list() image_scores = list() for c in range(1, self.n_classes): class_scores = predicted_scores[i][:, c] score_above_min_score = class_scores > min_score n_above_min_score = score_above_min_score.sum().item() if n_above_min_score == 0: continue class_scores = class_scores[score_above_min_score] class_decoded_locs = decoded_locs[score_above_min_score] class_scores, sort_ind = class_scores.sort(dim=0, descending=True) class_decoded_locs = class_decoded_locs[sort_ind] overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) suppress = torch.zeros(n_above_min_score, dtype=torch.bool) for box in range(class_decoded_locs.size(0)): if suppress[box] == 1: continue suppress = suppress | (overlap[box] > max_overlap) suppress[box] = 0 image_boxes.append(class_decoded_locs[~suppress]) image_labels.append(torch.LongTensor((~suppress).sum().item () * [c])) image_scores.append(class_scores[~suppress]) if len(image_boxes) == 0: image_boxes.append(torch.FloatTensor([[0.0, 0.0, 1.0, 1.0]])) image_labels.append(torch.LongTensor([0])) image_scores.append(torch.FloatTensor([0.0])) image_boxes = torch.cat(image_boxes, dim=0) image_labels = torch.cat(image_labels, dim=0) image_scores = torch.cat(image_scores, dim=0) n_objects = image_scores.size(0) if n_objects > top_k: image_scores, sort_ind = image_scores.sort(dim=0, descending=True) image_scores = image_scores[:top_k] image_boxes = image_boxes[sort_ind][:top_k] image_labels = image_labels[sort_ind][:top_k] all_images_boxes.append(image_boxes) all_images_labels.append(image_labels) all_images_scores.append(image_scores) return all_images_boxes, all_images_labels, all_images_scores def forward(self, input_0): primals_32 = self.rescale_factors primals_1 = self.base.conv1_1.weight primals_2 = self.base.conv1_1.bias primals_4 = self.base.conv1_2.weight primals_5 = self.base.conv1_2.bias primals_6 = self.base.conv2_1.weight primals_7 = self.base.conv2_1.bias primals_8 = self.base.conv2_2.weight primals_9 = self.base.conv2_2.bias primals_10 = self.base.conv3_1.weight primals_11 = self.base.conv3_1.bias primals_12 = self.base.conv3_2.weight primals_13 = self.base.conv3_2.bias primals_14 = self.base.conv3_3.weight primals_15 = self.base.conv3_3.bias primals_16 = self.base.conv4_1.weight primals_17 = self.base.conv4_1.bias primals_18 = self.base.conv4_2.weight primals_19 = self.base.conv4_2.bias primals_20 = self.base.conv4_3.weight primals_21 = self.base.conv4_3.bias primals_22 = self.base.conv5_1.weight primals_23 = self.base.conv5_1.bias primals_24 = self.base.conv5_2.weight primals_25 = self.base.conv5_2.bias primals_26 = self.base.conv5_3.weight primals_27 = self.base.conv5_3.bias primals_28 = self.base.conv6.weight primals_29 = self.base.conv6.bias primals_30 = self.base.conv7.weight primals_31 = self.base.conv7.bias primals_33 = self.aux_convs.conv8_1.weight primals_34 = self.aux_convs.conv8_1.bias primals_35 = self.aux_convs.conv8_2.weight primals_36 = self.aux_convs.conv8_2.bias primals_37 = self.aux_convs.conv9_1.weight primals_38 = self.aux_convs.conv9_1.bias primals_39 = self.aux_convs.conv9_2.weight primals_40 = self.aux_convs.conv9_2.bias primals_41 = self.aux_convs.conv10_1.weight primals_42 = self.aux_convs.conv10_1.bias primals_43 = self.aux_convs.conv10_2.weight primals_44 = self.aux_convs.conv10_2.bias primals_45 = self.aux_convs.conv11_1.weight primals_46 = self.aux_convs.conv11_1.bias primals_47 = self.aux_convs.conv11_2.weight primals_48 = self.aux_convs.conv11_2.bias primals_49 = self.aux_convs.conv12_1.weight primals_50 = self.aux_convs.conv12_1.bias primals_51 = self.aux_convs.conv12_2.weight primals_52 = self.aux_convs.conv12_2.bias primals_53 = self.pred_convs.loc_conv4_3.weight primals_54 = self.pred_convs.loc_conv4_3.bias primals_55 = self.pred_convs.loc_conv7.weight primals_56 = self.pred_convs.loc_conv7.bias primals_57 = self.pred_convs.loc_conv8_2.weight primals_58 = self.pred_convs.loc_conv8_2.bias primals_59 = self.pred_convs.loc_conv9_2.weight primals_60 = self.pred_convs.loc_conv9_2.bias primals_61 = self.pred_convs.loc_conv10_2.weight primals_62 = self.pred_convs.loc_conv10_2.bias primals_63 = self.pred_convs.loc_conv11_2.weight primals_64 = self.pred_convs.loc_conv11_2.bias primals_65 = self.pred_convs.loc_conv12_2.weight primals_66 = self.pred_convs.loc_conv12_2.bias primals_67 = self.pred_convs.cl_conv4_3.weight primals_68 = self.pred_convs.cl_conv4_3.bias primals_69 = self.pred_convs.cl_conv7.weight primals_70 = self.pred_convs.cl_conv7.bias primals_71 = self.pred_convs.cl_conv8_2.weight primals_72 = self.pred_convs.cl_conv8_2.bias primals_73 = self.pred_convs.cl_conv9_2.weight primals_74 = self.pred_convs.cl_conv9_2.bias primals_75 = self.pred_convs.cl_conv10_2.weight primals_76 = self.pred_convs.cl_conv10_2.bias primals_77 = self.pred_convs.cl_conv11_2.weight primals_78 = self.pred_convs.cl_conv11_2.bias primals_79 = self.pred_convs.cl_conv12_2.weight primals_80 = self.pred_convs.cl_conv12_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80]) return output[0], output[1]
doduythao/ssd
SSD512
false
13,234
[ "MIT" ]
0
170064a3edef05d3274b08ea7f622eb3238b5c5c
https://github.com/doduythao/ssd/tree/170064a3edef05d3274b08ea7f622eb3238b5c5c
ResNetV2
import torch import numpy as np from collections import OrderedDict from torch import nn import torch.nn.functional as F def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def tf2th(conv_weights): """Possibly convert HWIO to OIHW""" if conv_weights.ndim == 4: conv_weights = np.transpose(conv_weights, [3, 2, 0, 1]) return torch.from_numpy(conv_weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-10) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """ Follows the implementation of "Identity Mappings in Deep Residual Networks" here: https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cin) self.conv1 = conv1x1(cin, cmid) self.gn2 = nn.GroupNorm(32, cmid) self.conv2 = conv3x3(cmid, cmid, stride) self.gn3 = nn.GroupNorm(32, cmid) self.conv3 = conv1x1(cmid, cout) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride) def forward(self, x): out = self.relu(self.gn1(x)) residual = x if hasattr(self, 'downsample'): residual = self.downsample(out) out = self.conv1(out) out = self.conv2(self.relu(self.gn2(out))) out = self.conv3(self.relu(self.gn3(out))) return out + residual def load_from(self, weights, prefix=''): with torch.no_grad(): self.conv1.weight.copy_(tf2th(weights[prefix + 'a/standardized_conv2d/kernel'])) self.conv2.weight.copy_(tf2th(weights[prefix + 'b/standardized_conv2d/kernel'])) self.conv3.weight.copy_(tf2th(weights[prefix + 'c/standardized_conv2d/kernel'])) self.gn1.weight.copy_(tf2th(weights[prefix + 'a/group_norm/gamma']) ) self.gn2.weight.copy_(tf2th(weights[prefix + 'b/group_norm/gamma']) ) self.gn3.weight.copy_(tf2th(weights[prefix + 'c/group_norm/gamma']) ) self.gn1.bias.copy_(tf2th(weights[prefix + 'a/group_norm/beta'])) self.gn2.bias.copy_(tf2th(weights[prefix + 'b/group_norm/beta'])) self.gn3.bias.copy_(tf2th(weights[prefix + 'c/group_norm/beta'])) if hasattr(self, 'downsample'): self.downsample.weight.copy_(tf2th(weights[prefix + 'a/proj/standardized_conv2d/kernel'])) return self class ResNetV2(nn.Module): BLOCK_UNITS = {'r50': [3, 4, 6, 3], 'r101': [3, 4, 23, 3], 'r152': [3, 8, 36, 3]} def __init__(self, block_units, width_factor, head_size=21843, zero_head=False): super().__init__() wf = width_factor self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 * wf, kernel_size=7, stride=2, padding=3, bias=False)), ('padp', nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 * wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin= 256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2, block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid= 128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin= 512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2, block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid= 256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin= 1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2, block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid =512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin =2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2, block_units[3] + 1)])))])) self.zero_head = zero_head self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 * wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn. AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True))])) def forward(self, x): x = self.head(self.body(self.root(x))) assert x.shape[-2:] == (1, 1) return x[..., 0, 0] def load_from(self, weights, prefix='resnet/'): with torch.no_grad(): self.root.conv.weight.copy_(tf2th(weights[ f'{prefix}root_block/standardized_conv2d/kernel'])) self.head.gn.weight.copy_(tf2th(weights[ f'{prefix}group_norm/gamma'])) self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta'])) if self.zero_head: nn.init.zeros_(self.head.conv.weight) nn.init.zeros_(self.head.conv.bias) else: self.head.conv.weight.copy_(tf2th(weights[ f'{prefix}head/conv2d/kernel'])) self.head.conv.bias.copy_(tf2th(weights[ f'{prefix}head/conv2d/bias'])) for bname, block in self.body.named_children(): for uname, unit in block.named_children(): unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/') return self def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'block_units': [4, 4, 4, 4], 'width_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from collections import OrderedDict from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 768 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 2048 y1 = yindex // 2048 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 2048 * x2 + 18432 * y1), tmp0, xmask) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_6(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 rnumel = 147 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 147, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 147.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-10 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask) @triton.jit def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 8704 % 34 x1 = xindex // 256 % 34 x3 = xindex // 295936 x4 = xindex % 8704 x6 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x1 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-8448 + x4 + 8192 * x2 + 262144 * x3), tmp10, other=0.0) tl.store(out_ptr0 + x6, tmp11, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 16 x2 = xindex // 4096 % 16 x3 = xindex // 65536 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp5 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp7 = tl.load(in_ptr0 + (8960 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp9 = tl.load(in_ptr0 + (9216 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp11 = tl.load(in_ptr0 + (17408 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp13 = tl.load(in_ptr0 + (17664 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp15 = tl.load(in_ptr0 + (17920 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x4, tmp16, None) tl.store(out_ptr1 + x4, tmp41, None) @triton.jit def triton_red_fused_native_group_norm_9(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = rindex // 8 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_12(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_13(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 256 rnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2304.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2304 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 262144 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 262144 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 8192.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_20(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_21(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 131072 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_23(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4608.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4608 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_24(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = rindex // 16 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_25(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_26(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 64 r3 = rindex // 64 tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_31(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_33(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_34(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_36(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 9216.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 9216 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_37(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 32 r3 = rindex // 32 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_38(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 16384 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_39(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_40(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 128 r3 = rindex // 128 tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_42(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_44(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_45(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_46(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask) @triton.jit def triton_per_fused_native_group_norm_47(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 64 r3 = rindex // 64 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_48(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_49(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 18432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 18432 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 18432.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 18432 * x0), rmask, eviction_policy ='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 18432 * x0), tmp12, rmask) @triton.jit def triton_per_fused_native_group_norm_50(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 64 r3 = rindex // 64 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 8192 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_51(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 8192 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_52(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask) @triton.jit def triton_poi_fused_add_53(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_54(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 256 r3 = rindex // 256 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_55(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 8192 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 256), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 256), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_56(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 8192 * x0), tmp12, rmask) @triton.jit def triton_poi_fused_add_57(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_58(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 256 r3 = rindex // 256 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) @triton.jit def triton_poi_fused_mean_native_group_norm_relu_59(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8192 x1 = xindex // 8192 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32768 * x1), None) tmp1 = tl.load(in_ptr1 + x2 // 256, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2 // 256, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (8192 + x0 + 32768 * x1), None) tmp18 = tl.load(in_ptr0 + (16384 + x0 + 32768 * x1), None) tmp25 = tl.load(in_ptr0 + (24576 + x0 + 32768 * x1), None) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = tmp11 - tmp1 tmp13 = tmp12 * tmp3 tmp14 = tmp13 * tmp5 tmp15 = tmp14 + tmp7 tmp16 = triton_helpers.maximum(tmp9, tmp15) tmp17 = tmp10 + tmp16 tmp19 = tmp18 - tmp1 tmp20 = tmp19 * tmp3 tmp21 = tmp20 * tmp5 tmp22 = tmp21 + tmp7 tmp23 = triton_helpers.maximum(tmp9, tmp22) tmp24 = tmp17 + tmp23 tmp26 = tmp25 - tmp1 tmp27 = tmp26 * tmp3 tmp28 = tmp27 * tmp5 tmp29 = tmp28 + tmp7 tmp30 = triton_helpers.maximum(tmp9, tmp29) tmp31 = tmp24 + tmp30 tmp32 = 4.0 tmp33 = tmp31 / tmp32 tl.store(out_ptr0 + x2, tmp33, None) @triton.jit def triton_poi_fused_convolution_60(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 87372 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 21843 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154) = args args.clear() assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256,), (1,)) assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_6, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256,), (1,)) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256,), (1,)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_13, (1024,), (1,)) assert_size_stride(primals_14, (1024,), (1,)) assert_size_stride(primals_15, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_16, (256,), (1,)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (256,), (1,)) assert_size_stride(primals_20, (256,), (1,)) assert_size_stride(primals_21, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_22, (1024,), (1,)) assert_size_stride(primals_23, (1024,), (1,)) assert_size_stride(primals_24, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_25, (256,), (1,)) assert_size_stride(primals_26, (256,), (1,)) assert_size_stride(primals_27, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_28, (256,), (1,)) assert_size_stride(primals_29, (256,), (1,)) assert_size_stride(primals_30, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_31, (1024,), (1,)) assert_size_stride(primals_32, (1024,), (1,)) assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_34, (256,), (1,)) assert_size_stride(primals_35, (256,), (1,)) assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (256,), (1,)) assert_size_stride(primals_38, (256,), (1,)) assert_size_stride(primals_39, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_40, (1024,), (1,)) assert_size_stride(primals_41, (1024,), (1,)) assert_size_stride(primals_42, (2048, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_43, (512, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_44, (512,), (1,)) assert_size_stride(primals_45, (512,), (1,)) assert_size_stride(primals_46, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_47, (512,), (1,)) assert_size_stride(primals_48, (512,), (1,)) assert_size_stride(primals_49, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_50, (2048,), (1,)) assert_size_stride(primals_51, (2048,), (1,)) assert_size_stride(primals_52, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_53, (512,), (1,)) assert_size_stride(primals_54, (512,), (1,)) assert_size_stride(primals_55, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_56, (512,), (1,)) assert_size_stride(primals_57, (512,), (1,)) assert_size_stride(primals_58, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_59, (2048,), (1,)) assert_size_stride(primals_60, (2048,), (1,)) assert_size_stride(primals_61, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_62, (512,), (1,)) assert_size_stride(primals_63, (512,), (1,)) assert_size_stride(primals_64, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_65, (512,), (1,)) assert_size_stride(primals_66, (512,), (1,)) assert_size_stride(primals_67, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_68, (2048,), (1,)) assert_size_stride(primals_69, (2048,), (1,)) assert_size_stride(primals_70, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_71, (512,), (1,)) assert_size_stride(primals_72, (512,), (1,)) assert_size_stride(primals_73, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_74, (512,), (1,)) assert_size_stride(primals_75, (512,), (1,)) assert_size_stride(primals_76, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_77, (2048,), (1,)) assert_size_stride(primals_78, (2048,), (1,)) assert_size_stride(primals_79, (4096, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_80, (1024, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_81, (1024,), (1,)) assert_size_stride(primals_82, (1024,), (1,)) assert_size_stride(primals_83, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_84, (1024,), (1,)) assert_size_stride(primals_85, (1024,), (1,)) assert_size_stride(primals_86, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_87, (4096,), (1,)) assert_size_stride(primals_88, (4096,), (1,)) assert_size_stride(primals_89, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_90, (1024,), (1,)) assert_size_stride(primals_91, (1024,), (1,)) assert_size_stride(primals_92, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_93, (1024,), (1,)) assert_size_stride(primals_94, (1024,), (1,)) assert_size_stride(primals_95, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_96, (4096,), (1,)) assert_size_stride(primals_97, (4096,), (1,)) assert_size_stride(primals_98, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_99, (1024,), (1,)) assert_size_stride(primals_100, (1024,), (1,)) assert_size_stride(primals_101, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_102, (1024,), (1,)) assert_size_stride(primals_103, (1024,), (1,)) assert_size_stride(primals_104, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_105, (4096,), (1,)) assert_size_stride(primals_106, (4096,), (1,)) assert_size_stride(primals_107, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_108, (1024,), (1,)) assert_size_stride(primals_109, (1024,), (1,)) assert_size_stride(primals_110, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_111, (1024,), (1,)) assert_size_stride(primals_112, (1024,), (1,)) assert_size_stride(primals_113, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_114, (4096,), (1,)) assert_size_stride(primals_115, (4096,), (1,)) assert_size_stride(primals_116, (8192, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_117, (2048, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_118, (2048,), (1,)) assert_size_stride(primals_119, (2048,), (1,)) assert_size_stride(primals_120, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_121, (2048,), (1,)) assert_size_stride(primals_122, (2048,), (1,)) assert_size_stride(primals_123, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_124, (8192,), (1,)) assert_size_stride(primals_125, (8192,), (1,)) assert_size_stride(primals_126, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_127, (2048,), (1,)) assert_size_stride(primals_128, (2048,), (1,)) assert_size_stride(primals_129, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_130, (2048,), (1,)) assert_size_stride(primals_131, (2048,), (1,)) assert_size_stride(primals_132, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_133, (8192,), (1,)) assert_size_stride(primals_134, (8192,), (1,)) assert_size_stride(primals_135, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_136, (2048,), (1,)) assert_size_stride(primals_137, (2048,), (1,)) assert_size_stride(primals_138, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_139, (2048,), (1,)) assert_size_stride(primals_140, (2048,), (1,)) assert_size_stride(primals_141, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_142, (8192,), (1,)) assert_size_stride(primals_143, (8192,), (1,)) assert_size_stride(primals_144, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_145, (2048,), (1,)) assert_size_stride(primals_146, (2048,), (1,)) assert_size_stride(primals_147, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_148, (2048,), (1,)) assert_size_stride(primals_149, (2048,), (1,)) assert_size_stride(primals_150, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_151, (8192,), (1,)) assert_size_stride(primals_152, (8192,), (1,)) assert_size_stride(primals_153, (21843, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_154, (21843,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_9, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_9 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_18, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_27, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_27 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_36, buf5, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_36 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_46, buf6, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_46 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_55, buf7, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_55 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_64, buf8, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_64 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_73, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_73 buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_83, buf10, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_83 buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_92, buf11, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_92 buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_101, buf12, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_101 buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_110, buf13, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_110 buf14 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_poi_fused_5[grid(4194304, 9)](primals_120, buf14, 4194304, 9, XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1) del primals_120 buf15 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_poi_fused_5[grid(4194304, 9)](primals_129, buf15, 4194304, 9, XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1) del primals_129 buf16 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_poi_fused_5[grid(4194304, 9)](primals_138, buf16, 4194304, 9, XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1) del primals_138 buf17 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_poi_fused_5[grid(4194304, 9)](primals_147, buf17, 4194304, 9, XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1) del primals_147 buf19 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf21 = reinterpret_tensor(buf19, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf19 buf22 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) triton_per_fused_add_div_sqrt_sub_var_mean_6[grid(256)](buf21, buf0, buf22, 256, 147, XBLOCK=1, num_warps=2, num_stages=1) buf23 = extern_kernels.convolution(buf1, buf22, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf24 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256), torch.float32) triton_poi_fused_constant_pad_nd_7[grid(1183744)](buf23, buf24, 1183744, XBLOCK=1024, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) buf26 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(262144)](buf24, buf25, buf26, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf27 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf28 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf30 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf25, buf27, buf28, buf30, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf31 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf25, buf27, buf28, primals_3, primals_4, buf31, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_4 buf33 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf35 = reinterpret_tensor(buf33, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf33 buf36 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf35, primals_5, buf36, 1024, 256, num_warps=2, num_stages=1) buf37 = extern_kernels.convolution(buf31, buf36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf39 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf41 = reinterpret_tensor(buf39, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf39 buf42 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_12[grid(256)](buf41, primals_6, buf42, 256, 256, num_warps=2, num_stages=1) buf43 = extern_kernels.convolution(buf31, buf42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf44 = buf28 del buf28 buf45 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf47 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf43, buf44, buf45, buf47, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf48 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf43, buf44, buf45, primals_7, primals_8, buf48, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_8 buf50 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf52 = reinterpret_tensor(buf50, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf50 buf53 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_13[grid(256)](buf52, buf2, buf53, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf54 = extern_kernels.convolution(buf48, buf53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf55 = buf45 del buf45 buf56 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf58 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf54, buf55, buf56, buf58, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf59 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf54, buf55, buf56, primals_10, primals_11, buf59, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_11 buf61 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf63 = reinterpret_tensor(buf61, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf61 buf64 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf63, primals_12, buf64, 1024, 256, num_warps=2, num_stages=1) buf65 = extern_kernels.convolution(buf59, buf64, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf66 = buf37 del buf37 triton_poi_fused_add_14[grid(1048576)](buf66, buf65, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf67 = buf56 del buf56 buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf70 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_15[grid(128)](buf66, buf67, buf68, buf70, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf71 = buf65 del buf65 triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf66, buf67, buf68, primals_13, primals_14, buf71, 1048576, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_14 buf73 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf75 = reinterpret_tensor(buf73, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf73 buf76 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf75, primals_15, buf76, 256, 1024, num_warps=8, num_stages=1) buf77 = extern_kernels.convolution(buf71, buf76, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf77, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf78 = buf68 del buf68 buf79 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf81 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf77, buf78, buf79, buf81, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf82 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf77, buf78, buf79, primals_16, primals_17, buf82, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_17 buf84 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf86 = reinterpret_tensor(buf84, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf84 buf87 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_13[grid(256)](buf86, buf3, buf87, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf88 = extern_kernels.convolution(buf82, buf87, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf88, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf89 = buf79 del buf79 buf90 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf92 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf88, buf89, buf90, buf92, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf93 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf88, buf89, buf90, primals_19, primals_20, buf93, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_20 buf95 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf97 = reinterpret_tensor(buf95, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf95 buf98 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf97, primals_21, buf98, 1024, 256, num_warps=2, num_stages=1) buf99 = extern_kernels.convolution(buf93, buf98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf100 = buf99 del buf99 triton_poi_fused_add_18[grid(1048576)](buf100, buf66, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf101 = buf90 del buf90 buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf104 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf100, buf101, buf102, buf104, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf105 = reinterpret_tensor(buf23, (4, 1024, 16, 16), (262144, 1, 16384, 1024), 0) del buf23 triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf100, buf101, buf102, primals_22, primals_23, buf105, 1048576, XBLOCK =1024, num_warps=4, num_stages=1) del primals_23 buf107 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf109 = reinterpret_tensor(buf107, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf107 buf110 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf109, primals_24, buf110, 256, 1024, num_warps=8, num_stages=1) buf111 = extern_kernels.convolution(buf105, buf110, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf111, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf112 = buf102 del buf102 buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf115 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_9[grid(128)](buf111, buf112, buf113, buf115, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf116 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf111, buf112, buf113, primals_25, primals_26, buf116, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_26 buf118 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf120 = reinterpret_tensor(buf118, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf118 buf121 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_13[grid(256)](buf120, buf4, buf121, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf122 = extern_kernels.convolution(buf116, buf121, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf122, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf123 = buf113 del buf113 buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf126 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_9[grid(128)](buf122, buf123, buf124, buf126, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf127 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf122, buf123, buf124, primals_28, primals_29, buf127, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_29 buf129 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf131 = reinterpret_tensor(buf129, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf129 buf132 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf131, primals_30, buf132, 1024, 256, num_warps=2, num_stages=1) buf133 = extern_kernels.convolution(buf127, buf132, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf133, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf134 = buf133 del buf133 triton_poi_fused_add_18[grid(1048576)](buf134, buf100, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf135 = buf124 del buf124 buf136 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf138 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf134, buf135, buf136, buf138, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf139 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf134, buf135, buf136, primals_31, primals_32, buf139, 1048576, XBLOCK =1024, num_warps=4, num_stages=1) del primals_32 buf141 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf143 = reinterpret_tensor(buf141, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf141 buf144 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf143, primals_33, buf144, 256, 1024, num_warps=8, num_stages=1) buf145 = extern_kernels.convolution(buf139, buf144, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf145, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf146 = buf136 del buf136 buf147 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf149 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_9[grid(128)](buf145, buf146, buf147, buf149, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf150 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf145, buf146, buf147, primals_34, primals_35, buf150, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_35 buf152 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf154 = reinterpret_tensor(buf152, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf152 buf155 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_13[grid(256)](buf154, buf5, buf155, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf156 = extern_kernels.convolution(buf150, buf155, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf156, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf157 = buf147 del buf147 buf158 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf160 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_9[grid(128)](buf156, buf157, buf158, buf160, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf161 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf156, buf157, buf158, primals_37, primals_38, buf161, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_38 buf163 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf165 = reinterpret_tensor(buf163, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf163 buf166 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf165, primals_39, buf166, 1024, 256, num_warps=2, num_stages=1) buf167 = extern_kernels.convolution(buf161, buf166, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf167, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf168 = buf167 del buf167 triton_poi_fused_add_18[grid(1048576)](buf168, buf134, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf169 = buf158 del buf158 buf170 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf172 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf168, buf169, buf170, buf172, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf173 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf168, buf169, buf170, primals_40, primals_41, buf173, 1048576, XBLOCK =1024, num_warps=4, num_stages=1) del primals_41 buf175 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf177 = reinterpret_tensor(buf175, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf175 buf178 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf177, primals_42, buf178, 2048, 1024, num_warps=8, num_stages=1) buf179 = extern_kernels.convolution(buf173, buf178, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf179, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf181 buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_20[grid(512)](buf183, primals_43, buf184, 512, 1024, num_warps=8, num_stages=1) buf185 = extern_kernels.convolution(buf173, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf185, (4, 512, 16, 16), (131072, 1, 8192, 512)) buf186 = buf170 del buf170 buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_21[grid(128)](buf185, buf186, buf187, buf189, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf190 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512 ), torch.float32) triton_poi_fused_native_group_norm_relu_22[grid(524288)](buf185, buf186, buf187, primals_44, primals_45, buf190, 524288, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_45 buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf192 buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_23[grid(512)](buf194, buf6, buf195, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf197 = buf187 del buf187 buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf196, buf197, buf198, buf200, 128, 1024, num_warps=8, num_stages=1) buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf196, buf197, buf198, primals_47, primals_48, buf201, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_48 buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf203 buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf205, primals_49, buf206, 2048, 512, num_warps=4, num_stages=1) buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf208 = buf179 del buf179 triton_poi_fused_add_27[grid(524288)](buf208, buf207, 524288, XBLOCK=1024, num_warps=4, num_stages=1) buf209 = buf198 del buf198 buf210 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf212 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf208, buf209, buf210, buf212, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf213 = buf207 del buf207 triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf208, buf209, buf210, primals_50, primals_51, buf213, 524288, XBLOCK= 512, num_warps=8, num_stages=1) del primals_51 buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf215 buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf217, primals_52, buf218, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf220 = buf210 del buf210 buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf219, buf220, buf221, buf223, 128, 1024, num_warps=8, num_stages=1) buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf219, buf220, buf221, primals_53, primals_54, buf224, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_54 buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf226 buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_23[grid(512)](buf228, buf7, buf229, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf231 = buf221 del buf221 buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf230, buf231, buf232, buf234, 128, 1024, num_warps=8, num_stages=1) buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf230, buf231, buf232, primals_56, primals_57, buf235, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_57 buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf237 buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf239, primals_58, buf240, 2048, 512, num_warps=4, num_stages=1) buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf242 = buf241 del buf241 triton_poi_fused_add_31[grid(524288)](buf242, buf208, 524288, XBLOCK=1024, num_warps=4, num_stages=1) buf243 = buf232 del buf232 buf244 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf246 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf242, buf243, buf244, buf246, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf247 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf242, buf243, buf244, primals_59, primals_60, buf247, 524288, XBLOCK= 512, num_warps=8, num_stages=1) del primals_60 buf249 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf251 = reinterpret_tensor(buf249, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf249 buf252 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf251, primals_61, buf252, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf253 = extern_kernels.convolution(buf247, buf252, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf253, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf254 = buf244 del buf244 buf255 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf257 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf253, buf254, buf255, buf257, 128, 1024, num_warps=8, num_stages=1) buf258 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf253, buf254, buf255, primals_62, primals_63, buf258, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_63 buf260 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf262 = reinterpret_tensor(buf260, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf260 buf263 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_23[grid(512)](buf262, buf8, buf263, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf264 = extern_kernels.convolution(buf258, buf263, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf264, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf265 = buf255 del buf255 buf266 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf268 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf264, buf265, buf266, buf268, 128, 1024, num_warps=8, num_stages=1) buf269 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf264, buf265, buf266, primals_65, primals_66, buf269, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_66 buf271 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf273 = reinterpret_tensor(buf271, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf271 buf274 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf273, primals_67, buf274, 2048, 512, num_warps=4, num_stages=1) buf275 = extern_kernels.convolution(buf269, buf274, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf275, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf276 = buf275 del buf275 triton_poi_fused_add_31[grid(524288)](buf276, buf242, 524288, XBLOCK=1024, num_warps=4, num_stages=1) buf277 = buf266 del buf266 buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf280 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf276, buf277, buf278, buf280, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf281 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf276, buf277, buf278, primals_68, primals_69, buf281, 524288, XBLOCK= 512, num_warps=8, num_stages=1) del primals_69 buf283 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf285 = reinterpret_tensor(buf283, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf283 buf286 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf285, primals_70, buf286, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf287 = extern_kernels.convolution(buf281, buf286, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf287, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf288 = buf278 del buf278 buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf291 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf287, buf288, buf289, buf291, 128, 1024, num_warps=8, num_stages=1) buf292 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf287, buf288, buf289, primals_71, primals_72, buf292, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_72 buf294 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf296 = reinterpret_tensor(buf294, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf294 buf297 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_23[grid(512)](buf296, buf9, buf297, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf298 = extern_kernels.convolution(buf292, buf297, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf298, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf299 = buf289 del buf289 buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf302 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf298, buf299, buf300, buf302, 128, 1024, num_warps=8, num_stages=1) buf303 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf298, buf299, buf300, primals_74, primals_75, buf303, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_75 buf305 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf307 = reinterpret_tensor(buf305, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf305 buf308 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf307, primals_76, buf308, 2048, 512, num_warps=4, num_stages=1) buf309 = extern_kernels.convolution(buf303, buf308, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf309, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf310 = buf309 del buf309 triton_poi_fused_add_31[grid(524288)](buf310, buf276, 524288, XBLOCK=1024, num_warps=4, num_stages=1) buf311 = buf300 del buf300 buf312 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf314 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf310, buf311, buf312, buf314, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf315 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf310, buf311, buf312, primals_77, primals_78, buf315, 524288, XBLOCK= 512, num_warps=8, num_stages=1) del primals_78 buf317 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf319 = reinterpret_tensor(buf317, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf317 buf320 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf319, primals_79, buf320, 4096, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf321 = extern_kernels.convolution(buf315, buf320, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf321, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf323 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf325 = reinterpret_tensor(buf323, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf323 buf326 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_33[grid(1024)](buf325, primals_80, buf326, 1024, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf327 = extern_kernels.convolution(buf315, buf326, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf327, (4, 1024, 8, 8), (65536, 1, 8192, 1024)) buf328 = buf312 del buf312 buf329 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf331 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_34[grid(128)](buf327, buf328, buf329, buf331, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf332 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_35[grid(262144)](buf327, buf328, buf329, primals_81, primals_82, buf332, 262144, XBLOCK= 512, num_warps=8, num_stages=1) del primals_82 buf334 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf336 = reinterpret_tensor(buf334, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf334 buf337 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_36[grid(1024)](buf336, buf10, buf337, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf338 = extern_kernels.convolution(buf332, buf337, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf338, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf339 = buf329 del buf329 buf340 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf342 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf338, buf339, buf340, buf342, 128, 512, num_warps=4, num_stages=1) buf343 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf338, buf339, buf340, primals_84, primals_85, buf343, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_85 buf345 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf347 = reinterpret_tensor(buf345, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf345 buf348 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf347, primals_86, buf348, 4096, 1024, num_warps=8, num_stages=1) buf349 = extern_kernels.convolution(buf343, buf348, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf349, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf350 = buf321 del buf321 triton_poi_fused_add_40[grid(262144)](buf350, buf349, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf351 = buf340 del buf340 buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf350, buf351, buf352, buf354, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf355 = buf349 del buf349 triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf350, buf351, buf352, primals_87, primals_88, buf355, 262144, XBLOCK= 512, num_warps=8, num_stages=1) del primals_88 buf357 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf359 = reinterpret_tensor(buf357, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf357 buf360 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf359, primals_89, buf360, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf361 = extern_kernels.convolution(buf355, buf360, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf361, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf362 = buf352 del buf352 buf363 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf365 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf361, buf362, buf363, buf365, 128, 512, num_warps=4, num_stages=1) buf366 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf361, buf362, buf363, primals_90, primals_91, buf366, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_91 buf368 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf370 = reinterpret_tensor(buf368, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf368 buf371 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_36[grid(1024)](buf370, buf11, buf371, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf372 = extern_kernels.convolution(buf366, buf371, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf372, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf373 = buf363 del buf363 buf374 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf376 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf372, buf373, buf374, buf376, 128, 512, num_warps=4, num_stages=1) buf377 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf372, buf373, buf374, primals_93, primals_94, buf377, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_94 buf379 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf381 = reinterpret_tensor(buf379, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf379 buf382 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf381, primals_95, buf382, 4096, 1024, num_warps=8, num_stages=1) buf383 = extern_kernels.convolution(buf377, buf382, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf383, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf384 = buf383 del buf383 triton_poi_fused_add_44[grid(262144)](buf384, buf350, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf385 = buf374 del buf374 buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf384, buf385, buf386, buf388, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf384, buf385, buf386, primals_96, primals_97, buf389, 262144, XBLOCK= 512, num_warps=8, num_stages=1) del primals_97 buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf391 buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf393, primals_98, buf394, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf396 = buf386 del buf386 buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf395, buf396, buf397, buf399, 128, 512, num_warps=4, num_stages=1) buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf395, buf396, buf397, primals_99, primals_100, buf400, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_100 buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf402 buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_36[grid(1024)](buf404, buf12, buf405, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf407 = buf397 del buf397 buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf406, buf407, buf408, buf410, 128, 512, num_warps=4, num_stages=1) buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf406, buf407, buf408, primals_102, primals_103, buf411, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_103 buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf413 buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf415, primals_104, buf416, 4096, 1024, num_warps=8, num_stages=1) buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf418 = buf417 del buf417 triton_poi_fused_add_44[grid(262144)](buf418, buf384, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf419 = buf408 del buf408 buf420 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf422 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf418, buf419, buf420, buf422, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf423 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf418, buf419, buf420, primals_105, primals_106, buf423, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_106 buf425 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf427 = reinterpret_tensor(buf425, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf425 buf428 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf427, primals_107, buf428, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf429 = extern_kernels.convolution(buf423, buf428, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf429, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf430 = buf420 del buf420 buf431 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf433 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf429, buf430, buf431, buf433, 128, 512, num_warps=4, num_stages=1) buf434 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf429, buf430, buf431, primals_108, primals_109, buf434, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_109 buf436 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf438 = reinterpret_tensor(buf436, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf436 buf439 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_36[grid(1024)](buf438, buf13, buf439, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf440 = extern_kernels.convolution(buf434, buf439, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf440, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf441 = buf431 del buf431 buf442 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf444 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf440, buf441, buf442, buf444, 128, 512, num_warps=4, num_stages=1) buf445 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf440, buf441, buf442, primals_111, primals_112, buf445, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_112 buf447 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf449 = reinterpret_tensor(buf447, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf447 buf450 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf449, primals_113, buf450, 4096, 1024, num_warps=8, num_stages=1) buf451 = extern_kernels.convolution(buf445, buf450, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf451, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf452 = buf451 del buf451 triton_poi_fused_add_44[grid(262144)](buf452, buf418, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf453 = buf442 del buf442 buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf456 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf452, buf453, buf454, buf456, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf457 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf452, buf453, buf454, primals_114, primals_115, buf457, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_115 buf459 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf461 = reinterpret_tensor(buf459, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf459 buf462 = empty_strided_cuda((8192, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_45[grid(8192)](buf461, primals_116, buf462, 8192, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf463 = extern_kernels.convolution(buf457, buf462, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf463, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf465 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf467 = reinterpret_tensor(buf465, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf465 buf468 = empty_strided_cuda((2048, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_46[grid(2048)](buf467, primals_117, buf468, 2048, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf469 = extern_kernels.convolution(buf457, buf468, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf469, (4, 2048, 4, 4), (32768, 1, 8192, 2048)) buf470 = buf454 del buf454 buf471 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf473 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_47[grid(128)](buf469, buf470, buf471, buf473, 128, 1024, num_warps=8, num_stages=1) buf474 = empty_strided_cuda((4, 2048, 4, 4), (32768, 1, 8192, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_48[grid(131072)](buf469, buf470, buf471, primals_118, primals_119, buf474, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_119 buf476 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf478 = reinterpret_tensor(buf476, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf476 buf479 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_49[grid(2048)](buf478, buf14, buf479, 2048, 18432, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf480 = extern_kernels.convolution(buf474, buf479, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf480, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf481 = buf471 del buf471 buf482 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf484 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf480, buf481, buf482, buf484, 128, 256, num_warps=2, num_stages=1) buf485 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf480, buf481, buf482, primals_121, primals_122, buf485, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_122 buf487 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf489 = reinterpret_tensor(buf487, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf487 buf490 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf489, primals_123, buf490, 8192, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf491 = extern_kernels.convolution(buf485, buf490, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf491, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf492 = buf463 del buf463 triton_poi_fused_add_53[grid(131072)](buf492, buf491, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf493 = buf482 del buf482 buf494 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf496 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_54[grid(128)](buf492, buf493, buf494, buf496, 128, 1024, num_warps=8, num_stages=1) buf497 = buf491 del buf491 triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf492, buf493, buf494, primals_124, primals_125, buf497, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_125 buf499 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf501 = reinterpret_tensor(buf499, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf499 buf502 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf501, primals_126, buf502, 2048, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf503 = extern_kernels.convolution(buf497, buf502, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf503, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf504 = buf494 del buf494 buf505 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf507 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf503, buf504, buf505, buf507, 128, 256, num_warps=2, num_stages=1) buf508 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf503, buf504, buf505, primals_127, primals_128, buf508, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_128 buf510 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf512 = reinterpret_tensor(buf510, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf510 buf513 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_49[grid(2048)](buf512, buf15, buf513, 2048, 18432, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf514 = extern_kernels.convolution(buf508, buf513, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf514, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf515 = buf505 del buf505 buf516 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf518 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf514, buf515, buf516, buf518, 128, 256, num_warps=2, num_stages=1) buf519 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf514, buf515, buf516, primals_130, primals_131, buf519, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_131 buf521 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf523 = reinterpret_tensor(buf521, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf521 buf524 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf523, primals_132, buf524, 8192, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf525 = extern_kernels.convolution(buf519, buf524, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf525, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf526 = buf525 del buf525 triton_poi_fused_add_57[grid(131072)](buf526, buf492, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf527 = buf516 del buf516 buf528 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf530 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_54[grid(128)](buf526, buf527, buf528, buf530, 128, 1024, num_warps=8, num_stages=1) buf531 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192 ), torch.float32) triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf526, buf527, buf528, primals_133, primals_134, buf531, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_134 buf533 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf535 = reinterpret_tensor(buf533, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf533 buf536 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf535, primals_135, buf536, 2048, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf537 = extern_kernels.convolution(buf531, buf536, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf537, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf538 = buf528 del buf528 buf539 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf541 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf537, buf538, buf539, buf541, 128, 256, num_warps=2, num_stages=1) buf542 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf537, buf538, buf539, primals_136, primals_137, buf542, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_137 buf544 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf546 = reinterpret_tensor(buf544, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf544 buf547 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_49[grid(2048)](buf546, buf16, buf547, 2048, 18432, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf548 = extern_kernels.convolution(buf542, buf547, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf548, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf549 = buf539 del buf539 buf550 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf552 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf548, buf549, buf550, buf552, 128, 256, num_warps=2, num_stages=1) buf553 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf548, buf549, buf550, primals_139, primals_140, buf553, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_140 buf555 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf557 = reinterpret_tensor(buf555, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf555 buf558 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf557, primals_141, buf558, 8192, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf559 = extern_kernels.convolution(buf553, buf558, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf559, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf560 = buf559 del buf559 triton_poi_fused_add_57[grid(131072)](buf560, buf526, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf561 = buf550 del buf550 buf562 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf564 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_54[grid(128)](buf560, buf561, buf562, buf564, 128, 1024, num_warps=8, num_stages=1) buf565 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192 ), torch.float32) triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf560, buf561, buf562, primals_142, primals_143, buf565, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_143 buf567 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf569 = reinterpret_tensor(buf567, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf567 buf570 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf569, primals_144, buf570, 2048, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf571 = extern_kernels.convolution(buf565, buf570, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf571, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf572 = buf562 del buf562 buf573 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf575 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf571, buf572, buf573, buf575, 128, 256, num_warps=2, num_stages=1) buf576 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf571, buf572, buf573, primals_145, primals_146, buf576, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_146 buf578 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf580 = reinterpret_tensor(buf578, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf578 buf581 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_49[grid(2048)](buf580, buf17, buf581, 2048, 18432, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf582 = extern_kernels.convolution(buf576, buf581, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf582, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf583 = buf573 del buf573 buf584 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf586 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf582, buf583, buf584, buf586, 128, 256, num_warps=2, num_stages=1) buf587 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf582, buf583, buf584, primals_148, primals_149, buf587, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_149 buf589 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf591 = reinterpret_tensor(buf589, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf589 buf592 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf591, primals_150, buf592, 8192, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf593 = extern_kernels.convolution(buf587, buf592, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf593, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf594 = buf593 del buf593 triton_poi_fused_add_57[grid(131072)](buf594, buf560, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf595 = reinterpret_tensor(buf584, (4, 32, 1, 1), (32, 1, 32, 32), 0) del buf584 buf596 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf598 = reinterpret_tensor(buf596, (4, 32, 1, 1), (32, 1, 32, 32), 0) del buf596 triton_per_fused_native_group_norm_58[grid(128)](buf598, buf594, buf595, 128, 1024, num_warps=8, num_stages=1) buf599 = empty_strided_cuda((4, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) triton_poi_fused_mean_native_group_norm_relu_59[grid(32768)](buf594, buf595, buf598, primals_151, primals_152, buf599, 32768, XBLOCK =256, num_warps=4, num_stages=1) buf600 = extern_kernels.convolution(buf599, primals_153, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf600, (4, 21843, 1, 1), (21843, 1, 21843, 21843)) buf601 = reinterpret_tensor(buf600, (4, 21843, 1, 1), (21843, 1, 87372, 87372), 0) del buf600 triton_poi_fused_convolution_60[grid(87372)](buf601, primals_154, 87372, XBLOCK=512, num_warps=8, num_stages=1) del primals_154 return (reinterpret_tensor(buf601, (4, 21843), (21843, 1), 0), buf0, buf1, primals_3, primals_5, primals_6, primals_7, buf2, primals_10, primals_12, primals_13, primals_15, primals_16, buf3, primals_19, primals_21, primals_22, primals_24, primals_25, buf4, primals_28, primals_30, primals_31, primals_33, primals_34, buf5, primals_37, primals_39, primals_40, primals_42, primals_43, primals_44, buf6, primals_47, primals_49, primals_50, primals_52, primals_53, buf7, primals_56, primals_58, primals_59, primals_61, primals_62, buf8, primals_65, primals_67, primals_68, primals_70, primals_71, buf9, primals_74, primals_76, primals_77, primals_79, primals_80, primals_81, buf10, primals_84, primals_86, primals_87, primals_89, primals_90, buf11, primals_93, primals_95, primals_96, primals_98, primals_99, buf12, primals_102, primals_104, primals_105, primals_107, primals_108, buf13, primals_111, primals_113, primals_114, primals_116, primals_117, primals_118, buf14, primals_121, primals_123, primals_124, primals_126, primals_127, buf15, primals_130, primals_132, primals_133, primals_135, primals_136, buf16, primals_139, primals_141, primals_142, primals_144, primals_145, buf17, primals_148, primals_150, primals_151, primals_152, primals_153, buf21, buf22, buf24, buf25, buf26, reinterpret_tensor(buf27, (4, 32), (32, 1), 0), reinterpret_tensor(buf30, (4, 32), (32, 1), 0), buf31, buf35, buf36, buf41, buf42, buf43, reinterpret_tensor(buf44, (4, 32), (32, 1), 0), reinterpret_tensor(buf47, (4, 32), (32, 1), 0), buf48, buf52, buf53, buf54, reinterpret_tensor(buf55, (4, 32), (32, 1), 0), reinterpret_tensor(buf58, (4, 32), (32, 1), 0), buf59, buf63, buf64, buf66, reinterpret_tensor(buf67, (4, 32), (32, 1), 0), reinterpret_tensor(buf70, (4, 32), (32, 1), 0), buf71, buf75, buf76, buf77, reinterpret_tensor(buf78, (4, 32), (32, 1), 0), reinterpret_tensor(buf81, (4, 32), (32, 1), 0), buf82, buf86, buf87, buf88, reinterpret_tensor(buf89, (4, 32), (32, 1), 0), reinterpret_tensor(buf92, (4, 32), (32, 1), 0), buf93, buf97, buf98, buf100, reinterpret_tensor(buf101, (4, 32), (32, 1), 0), reinterpret_tensor(buf104, (4, 32), (32, 1), 0), buf105, buf109, buf110, buf111, reinterpret_tensor(buf112, (4, 32), (32, 1), 0), reinterpret_tensor(buf115, (4, 32), (32, 1), 0), buf116, buf120, buf121, buf122, reinterpret_tensor(buf123, (4, 32), (32, 1), 0), reinterpret_tensor(buf126, (4, 32), (32, 1), 0), buf127, buf131, buf132, buf134, reinterpret_tensor(buf135, (4, 32), (32, 1), 0), reinterpret_tensor(buf138, (4, 32), (32, 1), 0), buf139, buf143, buf144, buf145, reinterpret_tensor(buf146, (4, 32), (32, 1), 0), reinterpret_tensor(buf149, (4, 32), (32, 1), 0), buf150, buf154, buf155, buf156, reinterpret_tensor(buf157, (4, 32), (32, 1), 0), reinterpret_tensor(buf160, (4, 32), (32, 1), 0), buf161, buf165, buf166, buf168, reinterpret_tensor(buf169, (4, 32), (32, 1), 0), reinterpret_tensor(buf172, (4, 32), (32, 1), 0), buf173, buf177, buf178, buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf208, reinterpret_tensor(buf209, (4, 32), (32, 1), 0), reinterpret_tensor(buf212, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf242, reinterpret_tensor(buf243, (4, 32), (32, 1), 0), reinterpret_tensor(buf246, (4, 32), (32, 1), 0), buf247, buf251, buf252, buf253, reinterpret_tensor(buf254, (4, 32), (32, 1), 0), reinterpret_tensor(buf257, (4, 32), (32, 1), 0), buf258, buf262, buf263, buf264, reinterpret_tensor(buf265, (4, 32), (32, 1), 0), reinterpret_tensor(buf268, (4, 32), (32, 1), 0), buf269, buf273, buf274, buf276, reinterpret_tensor(buf277, (4, 32), (32, 1), 0), reinterpret_tensor(buf280, (4, 32), (32, 1), 0), buf281, buf285, buf286, buf287, reinterpret_tensor(buf288, (4, 32), (32, 1), 0), reinterpret_tensor(buf291, (4, 32), (32, 1), 0), buf292, buf296, buf297, buf298, reinterpret_tensor(buf299, (4, 32), (32, 1), 0), reinterpret_tensor(buf302, (4, 32), (32, 1), 0), buf303, buf307, buf308, buf310, reinterpret_tensor(buf311, (4, 32), (32, 1), 0), reinterpret_tensor(buf314, (4, 32), (32, 1), 0), buf315, buf319, buf320, buf325, buf326, buf327, reinterpret_tensor( buf328, (4, 32), (32, 1), 0), reinterpret_tensor(buf331, (4, 32), ( 32, 1), 0), buf332, buf336, buf337, buf338, reinterpret_tensor( buf339, (4, 32), (32, 1), 0), reinterpret_tensor(buf342, (4, 32), ( 32, 1), 0), buf343, buf347, buf348, buf350, reinterpret_tensor( buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), ( 32, 1), 0), buf355, buf359, buf360, buf361, reinterpret_tensor( buf362, (4, 32), (32, 1), 0), reinterpret_tensor(buf365, (4, 32), ( 32, 1), 0), buf366, buf370, buf371, buf372, reinterpret_tensor( buf373, (4, 32), (32, 1), 0), reinterpret_tensor(buf376, (4, 32), ( 32, 1), 0), buf377, buf381, buf382, buf384, reinterpret_tensor( buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), ( 32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor( buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), ( 32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor( buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), ( 32, 1), 0), buf411, buf415, buf416, buf418, reinterpret_tensor( buf419, (4, 32), (32, 1), 0), reinterpret_tensor(buf422, (4, 32), ( 32, 1), 0), buf423, buf427, buf428, buf429, reinterpret_tensor( buf430, (4, 32), (32, 1), 0), reinterpret_tensor(buf433, (4, 32), ( 32, 1), 0), buf434, buf438, buf439, buf440, reinterpret_tensor( buf441, (4, 32), (32, 1), 0), reinterpret_tensor(buf444, (4, 32), ( 32, 1), 0), buf445, buf449, buf450, buf452, reinterpret_tensor( buf453, (4, 32), (32, 1), 0), reinterpret_tensor(buf456, (4, 32), ( 32, 1), 0), buf457, buf461, buf462, buf467, buf468, buf469, reinterpret_tensor(buf470, (4, 32), (32, 1), 0), reinterpret_tensor (buf473, (4, 32), (32, 1), 0), buf474, buf478, buf479, buf480, reinterpret_tensor(buf481, (4, 32), (32, 1), 0), reinterpret_tensor (buf484, (4, 32), (32, 1), 0), buf485, buf489, buf490, buf492, reinterpret_tensor(buf493, (4, 32), (32, 1), 0), reinterpret_tensor (buf496, (4, 32), (32, 1), 0), buf497, buf501, buf502, buf503, reinterpret_tensor(buf504, (4, 32), (32, 1), 0), reinterpret_tensor (buf507, (4, 32), (32, 1), 0), buf508, buf512, buf513, buf514, reinterpret_tensor(buf515, (4, 32), (32, 1), 0), reinterpret_tensor (buf518, (4, 32), (32, 1), 0), buf519, buf523, buf524, buf526, reinterpret_tensor(buf527, (4, 32), (32, 1), 0), reinterpret_tensor (buf530, (4, 32), (32, 1), 0), buf531, buf535, buf536, buf537, reinterpret_tensor(buf538, (4, 32), (32, 1), 0), reinterpret_tensor (buf541, (4, 32), (32, 1), 0), buf542, buf546, buf547, buf548, reinterpret_tensor(buf549, (4, 32), (32, 1), 0), reinterpret_tensor (buf552, (4, 32), (32, 1), 0), buf553, buf557, buf558, buf560, reinterpret_tensor(buf561, (4, 32), (32, 1), 0), reinterpret_tensor (buf564, (4, 32), (32, 1), 0), buf565, buf569, buf570, buf571, reinterpret_tensor(buf572, (4, 32), (32, 1), 0), reinterpret_tensor (buf575, (4, 32), (32, 1), 0), buf576, buf580, buf581, buf582, reinterpret_tensor(buf583, (4, 32), (32, 1), 0), reinterpret_tensor (buf586, (4, 32), (32, 1), 0), buf587, buf591, buf592, buf594, buf595, buf598, buf599) def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def tf2th(conv_weights): """Possibly convert HWIO to OIHW""" if conv_weights.ndim == 4: conv_weights = np.transpose(conv_weights, [3, 2, 0, 1]) return torch.from_numpy(conv_weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-10) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """ Follows the implementation of "Identity Mappings in Deep Residual Networks" here: https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cin) self.conv1 = conv1x1(cin, cmid) self.gn2 = nn.GroupNorm(32, cmid) self.conv2 = conv3x3(cmid, cmid, stride) self.gn3 = nn.GroupNorm(32, cmid) self.conv3 = conv1x1(cmid, cout) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride) def forward(self, x): out = self.relu(self.gn1(x)) residual = x if hasattr(self, 'downsample'): residual = self.downsample(out) out = self.conv1(out) out = self.conv2(self.relu(self.gn2(out))) out = self.conv3(self.relu(self.gn3(out))) return out + residual def load_from(self, weights, prefix=''): with torch.no_grad(): self.conv1.weight.copy_(tf2th(weights[prefix + 'a/standardized_conv2d/kernel'])) self.conv2.weight.copy_(tf2th(weights[prefix + 'b/standardized_conv2d/kernel'])) self.conv3.weight.copy_(tf2th(weights[prefix + 'c/standardized_conv2d/kernel'])) self.gn1.weight.copy_(tf2th(weights[prefix + 'a/group_norm/gamma']) ) self.gn2.weight.copy_(tf2th(weights[prefix + 'b/group_norm/gamma']) ) self.gn3.weight.copy_(tf2th(weights[prefix + 'c/group_norm/gamma']) ) self.gn1.bias.copy_(tf2th(weights[prefix + 'a/group_norm/beta'])) self.gn2.bias.copy_(tf2th(weights[prefix + 'b/group_norm/beta'])) self.gn3.bias.copy_(tf2th(weights[prefix + 'c/group_norm/beta'])) if hasattr(self, 'downsample'): self.downsample.weight.copy_(tf2th(weights[prefix + 'a/proj/standardized_conv2d/kernel'])) return self class ResNetV2New(nn.Module): BLOCK_UNITS = {'r50': [3, 4, 6, 3], 'r101': [3, 4, 23, 3], 'r152': [3, 8, 36, 3]} def __init__(self, block_units, width_factor, head_size=21843, zero_head=False): super().__init__() wf = width_factor self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 * wf, kernel_size=7, stride=2, padding=3, bias=False)), ('padp', nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 * wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin= 256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2, block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid= 128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin= 512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2, block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid= 256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin= 1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2, block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid =512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin =2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2, block_units[3] + 1)])))])) self.zero_head = zero_head self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 * wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn. AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True))])) def load_from(self, weights, prefix='resnet/'): with torch.no_grad(): self.root.conv.weight.copy_(tf2th(weights[ f'{prefix}root_block/standardized_conv2d/kernel'])) self.head.gn.weight.copy_(tf2th(weights[ f'{prefix}group_norm/gamma'])) self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta'])) if self.zero_head: nn.init.zeros_(self.head.conv.weight) nn.init.zeros_(self.head.conv.bias) else: self.head.conv.weight.copy_(tf2th(weights[ f'{prefix}head/conv2d/kernel'])) self.head.conv.bias.copy_(tf2th(weights[ f'{prefix}head/conv2d/bias'])) for bname, block in self.body.named_children(): for uname, unit in block.named_children(): unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/') return self def forward(self, input_0): primals_1 = self.root.conv.weight primals_3 = self.body.block1.unit01.gn1.weight primals_4 = self.body.block1.unit01.gn1.bias primals_6 = self.body.block1.unit01.conv1.weight primals_7 = self.body.block1.unit01.gn2.weight primals_8 = self.body.block1.unit01.gn2.bias primals_9 = self.body.block1.unit01.conv2.weight primals_10 = self.body.block1.unit01.gn3.weight primals_11 = self.body.block1.unit01.gn3.bias primals_5 = self.body.block1.unit01.conv3.weight primals_12 = self.body.block1.unit01.downsample.weight primals_13 = self.body.block1.unit02.gn1.weight primals_14 = self.body.block1.unit02.gn1.bias primals_15 = self.body.block1.unit02.conv1.weight primals_16 = self.body.block1.unit02.gn2.weight primals_17 = self.body.block1.unit02.gn2.bias primals_18 = self.body.block1.unit02.conv2.weight primals_19 = self.body.block1.unit02.gn3.weight primals_20 = self.body.block1.unit02.gn3.bias primals_21 = self.body.block1.unit02.conv3.weight primals_22 = self.body.block1.unit03.gn1.weight primals_23 = self.body.block1.unit03.gn1.bias primals_24 = self.body.block1.unit03.conv1.weight primals_25 = self.body.block1.unit03.gn2.weight primals_26 = self.body.block1.unit03.gn2.bias primals_27 = self.body.block1.unit03.conv2.weight primals_28 = self.body.block1.unit03.gn3.weight primals_29 = self.body.block1.unit03.gn3.bias primals_30 = self.body.block1.unit03.conv3.weight primals_31 = self.body.block1.unit04.gn1.weight primals_32 = self.body.block1.unit04.gn1.bias primals_33 = self.body.block1.unit04.conv1.weight primals_34 = self.body.block1.unit04.gn2.weight primals_35 = self.body.block1.unit04.gn2.bias primals_36 = self.body.block1.unit04.conv2.weight primals_37 = self.body.block1.unit04.gn3.weight primals_38 = self.body.block1.unit04.gn3.bias primals_39 = self.body.block1.unit04.conv3.weight primals_40 = self.body.block2.unit01.gn1.weight primals_41 = self.body.block2.unit01.gn1.bias primals_43 = self.body.block2.unit01.conv1.weight primals_44 = self.body.block2.unit01.gn2.weight primals_45 = self.body.block2.unit01.gn2.bias primals_46 = self.body.block2.unit01.conv2.weight primals_47 = self.body.block2.unit01.gn3.weight primals_48 = self.body.block2.unit01.gn3.bias primals_49 = self.body.block2.unit01.conv3.weight primals_42 = self.body.block2.unit01.downsample.weight primals_50 = self.body.block2.unit02.gn1.weight primals_51 = self.body.block2.unit02.gn1.bias primals_52 = self.body.block2.unit02.conv1.weight primals_53 = self.body.block2.unit02.gn2.weight primals_54 = self.body.block2.unit02.gn2.bias primals_55 = self.body.block2.unit02.conv2.weight primals_56 = self.body.block2.unit02.gn3.weight primals_57 = self.body.block2.unit02.gn3.bias primals_58 = self.body.block2.unit02.conv3.weight primals_59 = self.body.block2.unit03.gn1.weight primals_60 = self.body.block2.unit03.gn1.bias primals_61 = self.body.block2.unit03.conv1.weight primals_62 = self.body.block2.unit03.gn2.weight primals_63 = self.body.block2.unit03.gn2.bias primals_64 = self.body.block2.unit03.conv2.weight primals_65 = self.body.block2.unit03.gn3.weight primals_66 = self.body.block2.unit03.gn3.bias primals_67 = self.body.block2.unit03.conv3.weight primals_68 = self.body.block2.unit04.gn1.weight primals_69 = self.body.block2.unit04.gn1.bias primals_70 = self.body.block2.unit04.conv1.weight primals_71 = self.body.block2.unit04.gn2.weight primals_72 = self.body.block2.unit04.gn2.bias primals_73 = self.body.block2.unit04.conv2.weight primals_74 = self.body.block2.unit04.gn3.weight primals_75 = self.body.block2.unit04.gn3.bias primals_76 = self.body.block2.unit04.conv3.weight primals_77 = self.body.block3.unit01.gn1.weight primals_78 = self.body.block3.unit01.gn1.bias primals_80 = self.body.block3.unit01.conv1.weight primals_81 = self.body.block3.unit01.gn2.weight primals_82 = self.body.block3.unit01.gn2.bias primals_83 = self.body.block3.unit01.conv2.weight primals_84 = self.body.block3.unit01.gn3.weight primals_85 = self.body.block3.unit01.gn3.bias primals_86 = self.body.block3.unit01.conv3.weight primals_79 = self.body.block3.unit01.downsample.weight primals_87 = self.body.block3.unit02.gn1.weight primals_88 = self.body.block3.unit02.gn1.bias primals_89 = self.body.block3.unit02.conv1.weight primals_90 = self.body.block3.unit02.gn2.weight primals_91 = self.body.block3.unit02.gn2.bias primals_92 = self.body.block3.unit02.conv2.weight primals_93 = self.body.block3.unit02.gn3.weight primals_94 = self.body.block3.unit02.gn3.bias primals_95 = self.body.block3.unit02.conv3.weight primals_96 = self.body.block3.unit03.gn1.weight primals_97 = self.body.block3.unit03.gn1.bias primals_98 = self.body.block3.unit03.conv1.weight primals_99 = self.body.block3.unit03.gn2.weight primals_100 = self.body.block3.unit03.gn2.bias primals_101 = self.body.block3.unit03.conv2.weight primals_102 = self.body.block3.unit03.gn3.weight primals_103 = self.body.block3.unit03.gn3.bias primals_104 = self.body.block3.unit03.conv3.weight primals_105 = self.body.block3.unit04.gn1.weight primals_106 = self.body.block3.unit04.gn1.bias primals_107 = self.body.block3.unit04.conv1.weight primals_108 = self.body.block3.unit04.gn2.weight primals_109 = self.body.block3.unit04.gn2.bias primals_110 = self.body.block3.unit04.conv2.weight primals_111 = self.body.block3.unit04.gn3.weight primals_112 = self.body.block3.unit04.gn3.bias primals_113 = self.body.block3.unit04.conv3.weight primals_114 = self.body.block4.unit01.gn1.weight primals_115 = self.body.block4.unit01.gn1.bias primals_117 = self.body.block4.unit01.conv1.weight primals_118 = self.body.block4.unit01.gn2.weight primals_119 = self.body.block4.unit01.gn2.bias primals_120 = self.body.block4.unit01.conv2.weight primals_121 = self.body.block4.unit01.gn3.weight primals_122 = self.body.block4.unit01.gn3.bias primals_123 = self.body.block4.unit01.conv3.weight primals_116 = self.body.block4.unit01.downsample.weight primals_124 = self.body.block4.unit02.gn1.weight primals_125 = self.body.block4.unit02.gn1.bias primals_126 = self.body.block4.unit02.conv1.weight primals_127 = self.body.block4.unit02.gn2.weight primals_128 = self.body.block4.unit02.gn2.bias primals_129 = self.body.block4.unit02.conv2.weight primals_130 = self.body.block4.unit02.gn3.weight primals_131 = self.body.block4.unit02.gn3.bias primals_132 = self.body.block4.unit02.conv3.weight primals_133 = self.body.block4.unit03.gn1.weight primals_134 = self.body.block4.unit03.gn1.bias primals_135 = self.body.block4.unit03.conv1.weight primals_136 = self.body.block4.unit03.gn2.weight primals_137 = self.body.block4.unit03.gn2.bias primals_138 = self.body.block4.unit03.conv2.weight primals_139 = self.body.block4.unit03.gn3.weight primals_140 = self.body.block4.unit03.gn3.bias primals_141 = self.body.block4.unit03.conv3.weight primals_142 = self.body.block4.unit04.gn1.weight primals_143 = self.body.block4.unit04.gn1.bias primals_144 = self.body.block4.unit04.conv1.weight primals_145 = self.body.block4.unit04.gn2.weight primals_146 = self.body.block4.unit04.gn2.bias primals_147 = self.body.block4.unit04.conv2.weight primals_148 = self.body.block4.unit04.gn3.weight primals_149 = self.body.block4.unit04.gn3.bias primals_150 = self.body.block4.unit04.conv3.weight primals_151 = self.head.gn.weight primals_152 = self.head.gn.bias primals_153 = self.head.conv.weight primals_154 = self.head.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154]) return output[0]
marekb-sci/kaggle_cassava
ResNetV2
false
13,235
[ "Apache-2.0" ]
0
158d1e398e713381c889e071329b96b9c0ba98d2
https://github.com/marekb-sci/kaggle_cassava/tree/158d1e398e713381c889e071329b96b9c0ba98d2
Model
from torch.nn import Module import torch import torch.nn.functional from torch.nn import Parameter from torch.nn.parameter import Parameter from torch.nn.modules import Module import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed from torch.nn import Module class Model(Module): def __init__(self): super(Model, self).__init__() self.a = Parameter(torch.FloatTensor(4096 * 4096).fill_(1.0)) self.b = Parameter(torch.FloatTensor(4096 * 4096).fill_(2.0)) def forward(self, input): return input * self.a * self.b def get_inputs(): return [torch.rand([4, 4, 4, 16777216])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn.functional from torch.nn import Parameter from torch.nn.parameter import Parameter from torch.nn.modules import Module import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 16777216 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16777216,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1)) assert_size_stride(primals_3, (16777216,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(1073741824)](primals_2, primals_1, primals_3, buf0, 1073741824, XBLOCK=1024, num_warps=4, num_stages=1 ) return buf0, primals_1, primals_2, primals_3 class ModelNew(Module): def __init__(self): super(ModelNew, self).__init__() self.a = Parameter(torch.FloatTensor(4096 * 4096).fill_(1.0)) self.b = Parameter(torch.FloatTensor(4096 * 4096).fill_(2.0)) def forward(self, input_0): primals_1 = self.a primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
DominickZhang/Distillation-Swin-Transformer
Model
false
13,236
[ "MIT" ]
0
6fc7b25bd558edb14e6f15715f53612c37e5166f
https://github.com/DominickZhang/Distillation-Swin-Transformer/tree/6fc7b25bd558edb14e6f15715f53612c37e5166f
L2Norm
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch._utils from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant_(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x = torch.div(x, norm) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch._utils from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant_(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Abraham-Xu/TF2
L2Norm
false
13,237
[ "Apache-2.0" ]
144
a5bc18acb7743dc5b6e85cfbefa8b88c3785ce78
https://github.com/Abraham-Xu/TF2/tree/a5bc18acb7743dc5b6e85cfbefa8b88c3785ce78
ToTensor
from torch.nn import Module import torch class ToTensor(Module): def __init__(self): super(ToTensor, self).__init__() def forward(self, x): x = x / 255 return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ToTensorNew(Module): def __init__(self): super(ToTensorNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMontgomerie/finn
ToTensor
false
13,238
[ "BSD-3-Clause" ]
283
ec5f67b333ad4db4acf6191c3b5ab5e9067347aa
https://github.com/AlexMontgomerie/finn/tree/ec5f67b333ad4db4acf6191c3b5ab5e9067347aa
ELUPlus
import torch import torch.nn as nn import torch.utils.data class ELUPlus(nn.Module): def __init__(self): super().__init__() self.elu = nn.ELU() def forward(self, x): return self.elu(x) + 1.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_elu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ELUPlusNew(nn.Module): def __init__(self): super().__init__() self.elu = nn.ELU() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AWehenkel/UMNN
ELUPlus
false
13,239
[ "BSD-3-Clause" ]
69
f93cb36040783dd60e14e0eda927899d3919825c
https://github.com/AWehenkel/UMNN/tree/f93cb36040783dd60e14e0eda927899d3919825c
tofp16
import torch import torch.nn as nn class tofp16(nn.Module): def __init__(self): super(tofp16, self).__init__() def forward(self, input): return input.half() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class tofp16New(nn.Module): def __init__(self): super(tofp16New, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AnonymousAuthors444/VEC_VAD
tofp16
false
13,240
[ "MIT" ]
67
0072bf857030e621e2f9c12689407b81e45ed603
https://github.com/AnonymousAuthors444/VEC_VAD/tree/0072bf857030e621e2f9c12689407b81e45ed603
AffineChannel2d
import torch import torch.nn as nn import torch.utils.data class AffineChannel2d(nn.Module): """ A simple channel-wise affine transformation operation """ def __init__(self, num_features): super().__init__() self.num_features = num_features self.weight = nn.Parameter(torch.Tensor(num_features)) self.bias = nn.Parameter(torch.Tensor(num_features)) self.weight.data.uniform_() self.bias.data.zero_() def forward(self, x): return x * self.weight.view(1, self.num_features, 1, 1 ) + self.bias.view(1, self.num_features, 1, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class AffineChannel2dNew(nn.Module): """ A simple channel-wise affine transformation operation """ def __init__(self, num_features): super().__init__() self.num_features = num_features self.weight = nn.Parameter(torch.Tensor(num_features)) self.bias = nn.Parameter(torch.Tensor(num_features)) self.weight.data.uniform_() self.bias.data.zero_() def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AmorosTech/RP-R-CNN
AffineChannel2d
false
13,241
[ "MIT" ]
78
45557a69ae9789e2662e3b937feb7624319a3e73
https://github.com/AmorosTech/RP-R-CNN/tree/45557a69ae9789e2662e3b937feb7624319a3e73
RankCrossEntropyLoss
import torch import torch.nn as nn import torch.nn.functional as F class RankCrossEntropyLoss(nn.Module): """Creates a criterion that measures rank cross entropy loss.""" __constants__ = ['num_neg'] def __init__(self, num_neg: 'int'=1): """ :class:`RankCrossEntropyLoss` constructor. :param num_neg: Number of negative instances in hinge loss. """ super().__init__() self.num_neg = num_neg def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'): """ Calculate rank cross entropy loss. :param y_pred: Predicted result. :param y_true: Label. :return: Rank cross loss. """ logits = y_pred[::self.num_neg + 1, :] labels = y_true[::self.num_neg + 1, :] for neg_idx in range(self.num_neg): neg_logits = y_pred[neg_idx + 1::self.num_neg + 1, :] neg_labels = y_true[neg_idx + 1::self.num_neg + 1, :] logits = torch.cat((logits, neg_logits), dim=-1) labels = torch.cat((labels, neg_labels), dim=-1) return -torch.mean(torch.sum(labels * torch.log(F.softmax(logits, dim=-1) + torch.finfo(float).eps), dim=-1)) @property def num_neg(self): """`num_neg` getter.""" return self._num_neg @num_neg.setter def num_neg(self, value): """`num_neg` setter.""" self._num_neg = value def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_add_cat_log_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = r2 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 128 * x1 + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (64 + 4 * x0 + 128 * x1 + (-4 + r2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, float('-inf')) tmp14 = triton_helpers.max2(tmp13, 1)[:, None] tmp15 = tmp10 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tl.load(in_ptr1 + (4 * x0 + 128 * x1 + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr1 + (64 + 4 * x0 + 128 * x1 + (-4 + r2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp16 / tmp20 tmp25 = 2.220446049250313e-16 tmp26 = tmp24 + tmp25 tmp27 = tl_math.log(tmp26) tmp28 = tmp23 * tmp27 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tl.store(in_out_ptr0 + x3, tmp32, xmask) @triton.jit def triton_per_fused_mean_neg_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 32.0 tmp5 = tmp3 / tmp4 tmp6 = -tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((2, 4, 4, 1), (16, 4, 1, 32), torch.float32) buf2 = reinterpret_tensor(buf1, (2, 4, 4), (16, 4, 1), 0) del buf1 get_raw_stream(0) triton_per_fused__softmax_add_cat_log_mul_sum_0[grid(32)](buf2, arg0_1, arg1_1, 32, 8, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_mean_neg_1[grid(1)](buf4, buf2, 1, 32, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf4, class RankCrossEntropyLossNew(nn.Module): """Creates a criterion that measures rank cross entropy loss.""" __constants__ = ['num_neg'] def __init__(self, num_neg: 'int'=1): """ :class:`RankCrossEntropyLoss` constructor. :param num_neg: Number of negative instances in hinge loss. """ super().__init__() self.num_neg = num_neg @property def num_neg(self): """`num_neg` getter.""" return self._num_neg @num_neg.setter def num_neg(self, value): """`num_neg` setter.""" self._num_neg = value def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Ambitioner-c/MatchZoo-py
RankCrossEntropyLoss
false
13,242
[ "Apache-2.0" ]
468
bb088edce8e01c2c2326ca1a8ac647f0d23f088d
https://github.com/Ambitioner-c/MatchZoo-py/tree/bb088edce8e01c2c2326ca1a8ac647f0d23f088d
Upsample
import torch import torch.nn as nn class Upsample(nn.Module): def __init__(self, stride=2): super(Upsample, self).__init__() self.stride = stride def forward(self, x): stride = self.stride assert x.data.dim() == 4 B = x.data.size(0) C = x.data.size(1) H = x.data.size(2) W = x.data.size(3) x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride ).contiguous().view(B, C, H * stride, W * stride) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 4 x3 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), class UpsampleNew(nn.Module): def __init__(self, stride=2): super(UpsampleNew, self).__init__() self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexRogalskiy/smart-social-distancing
Upsample
false
13,243
[ "Apache-2.0" ]
113
2def6738038035e67ac79fc9b72ba072e190321f
https://github.com/AlexRogalskiy/smart-social-distancing/tree/2def6738038035e67ac79fc9b72ba072e190321f
VocabGraphConvolution
import math import torch import torch.nn as nn import torch.nn.init as init class VocabGraphConvolution(nn.Module): """Vocabulary GCN module. Params: `voc_dim`: The size of vocabulary graph `num_adj`: The number of the adjacency matrix of Vocabulary graph `hid_dim`: The hidden dimension after XAW `out_dim`: The output dimension after Relu(XAW)W `dropout_rate`: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. Inputs: `vocab_adj_list`: The list of the adjacency matrix `X_dv`: the feature of mini batch document, can be TF-IDF (batch, vocab), or word embedding (batch, word_embedding_dim, vocab) Outputs: The graph embedding representation, dimension (batch, `out_dim`) or (batch, word_embedding_dim, `out_dim`) """ def __init__(self, voc_dim, num_adj, hid_dim, out_dim, dropout_rate=0.2): super(VocabGraphConvolution, self).__init__() self.voc_dim = voc_dim self.num_adj = num_adj self.hid_dim = hid_dim self.out_dim = out_dim for i in range(self.num_adj): setattr(self, 'W%d_vh' % i, nn.Parameter(torch.randn(voc_dim, hid_dim))) self.fc_hc = nn.Linear(hid_dim, out_dim) self.act_func = nn.ReLU() self.dropout = nn.Dropout(dropout_rate) self.reset_parameters() def reset_parameters(self): for n, p in self.named_parameters(): if n.startswith('W') or n.startswith('a') or n in ('W', 'a', 'dense'): init.kaiming_uniform_(p, a=math.sqrt(5)) def forward(self, vocab_adj_list, X_dv, add_linear_mapping_term=False): for i in range(self.num_adj): H_vh = vocab_adj_list[i].mm(getattr(self, 'W%d_vh' % i)) H_vh = self.dropout(H_vh) H_dh = X_dv.matmul(H_vh) if add_linear_mapping_term: H_linear = X_dv.matmul(getattr(self, 'W%d_vh' % i)) H_linear = self.dropout(H_linear) H_dh += H_linear if i == 0: fused_H = H_dh else: fused_H += H_dh out = self.fc_hc(fused_H) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'voc_dim': 4, 'num_adj': 4, 'hid_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp5 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf0, out=buf1) buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf2, out=buf3) buf4 = buf2 del buf2 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), primals_5, out=buf4) del primals_5 buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf4, out=buf5) buf6 = buf4 del buf4 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), primals_6, out=buf6) del primals_6 buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf6, out=buf7) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_add_0[grid(64)](buf8, buf3, buf5, buf7, 64, XBLOCK =64, num_warps=1, num_stages=1) del buf3 del buf5 buf9 = buf7 del buf7 triton_poi_fused_view_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 extern_kernels.addmm(primals_8, buf9, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_8 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), buf9, primals_7, reinterpret_tensor(primals_3, (4, 16), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 48 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 32 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 16 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class VocabGraphConvolutionNew(nn.Module): """Vocabulary GCN module. Params: `voc_dim`: The size of vocabulary graph `num_adj`: The number of the adjacency matrix of Vocabulary graph `hid_dim`: The hidden dimension after XAW `out_dim`: The output dimension after Relu(XAW)W `dropout_rate`: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. Inputs: `vocab_adj_list`: The list of the adjacency matrix `X_dv`: the feature of mini batch document, can be TF-IDF (batch, vocab), or word embedding (batch, word_embedding_dim, vocab) Outputs: The graph embedding representation, dimension (batch, `out_dim`) or (batch, word_embedding_dim, `out_dim`) """ def __init__(self, voc_dim, num_adj, hid_dim, out_dim, dropout_rate=0.2): super(VocabGraphConvolutionNew, self).__init__() self.voc_dim = voc_dim self.num_adj = num_adj self.hid_dim = hid_dim self.out_dim = out_dim for i in range(self.num_adj): setattr(self, 'W%d_vh' % i, nn.Parameter(torch.randn(voc_dim, hid_dim))) self.fc_hc = nn.Linear(hid_dim, out_dim) self.act_func = nn.ReLU() self.dropout = nn.Dropout(dropout_rate) self.reset_parameters() def reset_parameters(self): for n, p in self.named_parameters(): if n.startswith('W') or n.startswith('a') or n in ('W', 'a', 'dense'): init.kaiming_uniform_(p, a=math.sqrt(5)) def forward(self, input_0, input_1): primals_2 = self.W0_vh primals_4 = self.W1_vh primals_5 = self.W2_vh primals_6 = self.W3_vh primals_7 = self.fc_hc.weight primals_8 = self.fc_hc.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Aksh97/VGCN-BERT
VocabGraphConvolution
false
13,244
[ "MIT" ]
106
62b5ae5a3c53f4bff555027d87a57d3a994a32bb
https://github.com/Aksh97/VGCN-BERT/tree/62b5ae5a3c53f4bff555027d87a57d3a994a32bb
LuongAttention
import torch import torch.nn.functional as F from torch import nn class LuongAttention(nn.Module): """ Luong Attention from Effective Approaches to Attention-based Neural Machine Translation https://arxiv.org/pdf/1508.04025.pdf """ def __init__(self, attention_dim): super(LuongAttention, self).__init__() self.W = nn.Linear(attention_dim, attention_dim, bias=False) def score(self, decoder_hidden, encoder_out): encoder_out = self.W(encoder_out) encoder_out = encoder_out.permute(1, 0, 2) return encoder_out @ decoder_hidden.permute(1, 2, 0) def forward(self, decoder_hidden, encoder_out): energies = self.score(decoder_hidden, encoder_out) mask = F.softmax(energies, dim=1) context = encoder_out.permute(1, 2, 0) @ mask context = context.permute(2, 0, 1) mask = mask.permute(2, 0, 1) return context, mask def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'attention_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(primals_3, (4, 4, 4), (4, 1, 16), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (4, 1, 16), 0), buf3, out=buf4) return reinterpret_tensor(buf4, (4, 4, 4), (1, 16, 4), 0 ), reinterpret_tensor(buf3, (4, 4, 4), (1, 16, 4), 0 ), primals_2, buf3, reinterpret_tensor(primals_3, (4, 4, 4), (4, 16, 1), 0) class LuongAttentionNew(nn.Module): """ Luong Attention from Effective Approaches to Attention-based Neural Machine Translation https://arxiv.org/pdf/1508.04025.pdf """ def __init__(self, attention_dim): super(LuongAttentionNew, self).__init__() self.W = nn.Linear(attention_dim, attention_dim, bias=False) def score(self, decoder_hidden, encoder_out): encoder_out = self.W(encoder_out) encoder_out = encoder_out.permute(1, 0, 2) return encoder_out @ decoder_hidden.permute(1, 2, 0) def forward(self, input_0, input_1): primals_1 = self.W.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
A-Jacobson/minimal-nmt
LuongAttention
false
13,245
[ "MIT" ]
45
dc75e83579a181586acabfa3f22ad269d1e31fbf
https://github.com/A-Jacobson/minimal-nmt/tree/dc75e83579a181586acabfa3f22ad269d1e31fbf
ConvNorm
import torch import torch.utils.data class ConvNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNorm, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, signal): conv_signal = self.conv(signal) return conv_signal def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0) class ConvNormNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNormNew, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AeroXi/Tacotron2-Mandarin
ConvNorm
false
13,246
[ "MIT" ]
67
b7bc213d1c1a9c3e2f2e11f69f586c2582010668
https://github.com/AeroXi/Tacotron2-Mandarin/tree/b7bc213d1c1a9c3e2f2e11f69f586c2582010668
Actor
import torch import torch.nn as nn import torch.nn.functional as F class Actor(nn.Module): def __init__(self, obs_dim, action_dim): super(Actor, self).__init__() self.obs_dim = obs_dim self.action_dim = action_dim self.linear1 = nn.Linear(self.obs_dim, 512) self.linear2 = nn.Linear(512, 128) self.linear3 = nn.Linear(128, self.action_dim) def forward(self, obs): x = F.relu(self.linear1(obs)) x = F.relu(self.linear2(x)) x = torch.tanh(self.linear3(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'obs_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 512), (512, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (4, 128), (128, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1, primals_2, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 128), (1, 512), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3, primals_5, buf6, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 512), (512, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 class ActorNew(nn.Module): def __init__(self, obs_dim, action_dim): super(ActorNew, self).__init__() self.obs_dim = obs_dim self.action_dim = action_dim self.linear1 = nn.Linear(self.obs_dim, 512) self.linear2 = nn.Linear(512, 128) self.linear3 = nn.Linear(128, self.action_dim) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AYUSHKABIRVERMA/Multi-agent-reinforcement-learning
Actor
false
13,247
[ "MIT" ]
62
cd7c13d723cd74dc278939d81d5dd1b0906cee7c
https://github.com/AYUSHKABIRVERMA/Multi-agent-reinforcement-learning/tree/cd7c13d723cd74dc278939d81d5dd1b0906cee7c
ReOrgLayer
import torch import torch.nn as nn class ReOrgLayer(nn.Module): def __init__(self, stride=2): super(ReOrgLayer, self).__init__() self.stride = stride def forward(self, x): assert x.data.dim() == 4 B, C, H, W = x.data.shape hs = self.stride ws = self.stride assert H % hs == 0, 'The stride ' + str(self.stride ) + ' is not a proper divisor of height ' + str(H) assert W % ws == 0, 'The stride ' + str(self.stride ) + ' is not a proper divisor of height ' + str(W) x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(-2, -3 ).contiguous() x = x.view(B, C, H // hs * W // ws, hs, ws) x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(-1, -2 ).contiguous() x = x.view(B, C, ws * hs, H // ws, W // ws).transpose(1, 2).contiguous( ) x = x.view(B, C * ws * hs, H // ws, W // ws) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 2 x3 = xindex // 2 y0 = yindex % 4 y1 = yindex // 4 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (2 * x2 + 4 * (y0 // 2) + 8 * x3 + 64 * y1 + y0 % 2), xmask & ymask) tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 2), (64, 16, 4, 2, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), class ReOrgLayerNew(nn.Module): def __init__(self, stride=2): super(ReOrgLayerNew, self).__init__() self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexRogalskiy/smart-social-distancing
ReOrgLayer
false
13,248
[ "Apache-2.0" ]
113
2def6738038035e67ac79fc9b72ba072e190321f
https://github.com/AlexRogalskiy/smart-social-distancing/tree/2def6738038035e67ac79fc9b72ba072e190321f
ConvBlock
import torch import torch.nn as nn class ConvBlock(nn.Module): """ Simple 3x3 conv with padding size 1 (to leave the input size unchanged), followed by a ReLU. """ def __init__(self, input_channels: 'int', output_channels: 'int', kernel_size: 'Param2D'=3, stride: 'Param2D'=1, padding: 'Param2D'=1 ) ->None: super().__init__() self.conv = nn.Conv2d(input_channels, output_channels, kernel_size= kernel_size, stride=stride, padding=padding) self.relu = nn.ReLU() def forward(self, x: 'torch.Tensor') ->torch.Tensor: """ Parameters ---------- x of dimensions (B, C, H, W) Returns ------- torch.Tensor of dimensions (B, C, H, W) """ c = self.conv(x) r = self.relu(c) return r def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf2 class ConvBlockNew(nn.Module): """ Simple 3x3 conv with padding size 1 (to leave the input size unchanged), followed by a ReLU. """ def __init__(self, input_channels: 'int', output_channels: 'int', kernel_size: 'Param2D'=3, stride: 'Param2D'=1, padding: 'Param2D'=1 ) ->None: super().__init__() self.conv = nn.Conv2d(input_channels, output_channels, kernel_size= kernel_size, stride=stride, padding=padding) self.relu = nn.ReLU() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AleksandrLiadov/fsdl-text-recognizer-2021-labs
ConvBlock
false
13,249
[ "MIT" ]
402
9495e1457fc82ab83ff7e4141939d603565eb89b
https://github.com/AleksandrLiadov/fsdl-text-recognizer-2021-labs/tree/9495e1457fc82ab83ff7e4141939d603565eb89b
MeanVoxelFeatureExtractor
import torch import torch.nn as nn class VoxelFeatureExtractor(nn.Module): def __init__(self, **kwargs): super().__init__() def get_output_feature_dim(self): raise NotImplementedError def forward(self, **kwargs): raise NotImplementedError class MeanVoxelFeatureExtractor(VoxelFeatureExtractor): def __init__(self, **kwargs): super().__init__() def get_output_feature_dim(self): return cfg.DATA_CONFIG.NUM_POINT_FEATURES['use'] def forward(self, features, num_voxels, **kwargs): """ :param features: (N, max_points_of_each_voxel, 3 + C) :param num_voxels: (N) :param kwargs: :return: """ points_mean = features[:, :, :].sum(dim=1, keepdim=False ) / num_voxels.type_as(features).view(-1, 1) return points_mean.contiguous() def get_inputs(): return [torch.rand([64, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (64, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class VoxelFeatureExtractor(nn.Module): def __init__(self, **kwargs): super().__init__() def get_output_feature_dim(self): raise NotImplementedError def forward(self, **kwargs): raise NotImplementedError class MeanVoxelFeatureExtractorNew(VoxelFeatureExtractor): def __init__(self, **kwargs): super().__init__() def get_output_feature_dim(self): return cfg.DATA_CONFIG.NUM_POINT_FEATURES['use'] def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AndyYuan96/MVF-End-to-End-Multi-View-Fusion-for-3D-Object-Detection-in-LiDAR-Point-Clouds-
MeanVoxelFeatureExtractor
false
13,250
[ "Apache-2.0" ]
55
cf34897f25353a3f348d0a39c8db5ba15cadb2d7
https://github.com/AndyYuan96/MVF-End-to-End-Multi-View-Fusion-for-3D-Object-Detection-in-LiDAR-Point-Clouds-/tree/cf34897f25353a3f348d0a39c8db5ba15cadb2d7
Scale
import torch import torch.nn as nn import torch.utils.data class Scale(nn.Module): def __init__(self, init_value=1.0): super(Scale, self).__init__() self.scale = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input): return input * self.scale def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class ScaleNew(nn.Module): def __init__(self, init_value=1.0): super(ScaleNew, self).__init__() self.scale = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input_0): primals_1 = self.scale primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
AmorosTech/RP-R-CNN
Scale
false
13,251
[ "MIT" ]
78
45557a69ae9789e2662e3b937feb7624319a3e73
https://github.com/AmorosTech/RP-R-CNN/tree/45557a69ae9789e2662e3b937feb7624319a3e73
GaussianKernel
import torch import torch.nn as nn class GaussianKernel(nn.Module): """ Gaussian kernel module. :param mu: Float, mean of the kernel. :param sigma: Float, sigma of the kernel. Examples: >>> import torch >>> kernel = GaussianKernel() >>> x = torch.randn(4, 5, 10) >>> x.shape torch.Size([4, 5, 10]) >>> kernel(x).shape torch.Size([4, 5, 10]) """ def __init__(self, mu: 'float'=1.0, sigma: 'float'=1.0): """Gaussian kernel constructor.""" super().__init__() self.mu = mu self.sigma = sigma def forward(self, x): """Forward.""" return torch.exp(-0.5 * (x - self.mu) ** 2 / self.sigma ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = -0.5 tmp5 = tmp3 * tmp4 tmp6 = tmp5 * tmp1 tmp7 = tl_math.exp(tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_mul_pow_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GaussianKernelNew(nn.Module): """ Gaussian kernel module. :param mu: Float, mean of the kernel. :param sigma: Float, sigma of the kernel. Examples: >>> import torch >>> kernel = GaussianKernel() >>> x = torch.randn(4, 5, 10) >>> x.shape torch.Size([4, 5, 10]) >>> kernel(x).shape torch.Size([4, 5, 10]) """ def __init__(self, mu: 'float'=1.0, sigma: 'float'=1.0): """Gaussian kernel constructor.""" super().__init__() self.mu = mu self.sigma = sigma def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Ambitioner-c/MatchZoo-py
GaussianKernel
false
13,252
[ "Apache-2.0" ]
468
bb088edce8e01c2c2326ca1a8ac647f0d23f088d
https://github.com/Ambitioner-c/MatchZoo-py/tree/bb088edce8e01c2c2326ca1a8ac647f0d23f088d
CoordLoss
import torch import torch.optim import torch.nn as nn class CoordLoss(nn.Module): def __init__(self): super(CoordLoss, self).__init__() def forward(self, coord_out, coord_gt, valid, is_3D=None): loss = torch.abs(coord_out - coord_gt) * valid if is_3D is not None: loss_z = loss[:, :, 2:] * is_3D[:, None, None].float() loss = torch.cat((loss[:, :, :2], loss_z), 2) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.optim import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp5 = tmp3 * tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_mul_sub_0[grid(256)](arg0_1, arg1_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class CoordLossNew(nn.Module): def __init__(self): super(CoordLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Alan-delete/I2L-MeshNet_RELEASE
CoordLoss
false
13,253
[ "MIT" ]
544
22d63becc6f6e558e5180a8718dbaa8dde1cc6e5
https://github.com/Alan-delete/I2L-MeshNet_RELEASE/tree/22d63becc6f6e558e5180a8718dbaa8dde1cc6e5
ScaledDotProductAttention
import torch import numpy as np import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super(ScaledDotProductAttention, self).__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature if mask is not None: attn = attn.masked_fill(mask, -np.inf) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class ScaledDotProductAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super(ScaledDotProductAttentionNew, self).__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
Aleph0Inc/HDSA-Dialog
ScaledDotProductAttention
false
13,254
[ "MIT" ]
146
88e2604adb5dc38ae32205410b15b2ac39116ecd
https://github.com/Aleph0Inc/HDSA-Dialog/tree/88e2604adb5dc38ae32205410b15b2ac39116ecd
L1
import torch import torch.nn as nn class L1(nn.Module): def __init__(self): super(L1, self).__init__() def forward(self, output, target): lossvalue = torch.abs(output - target).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1New(nn.Module): def __init__(self): super(L1New, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AnonymousAuthors444/VEC_VAD
L1
false
13,255
[ "MIT" ]
67
0072bf857030e621e2f9c12689407b81e45ed603
https://github.com/AnonymousAuthors444/VEC_VAD/tree/0072bf857030e621e2f9c12689407b81e45ed603
FCN_mse
import torch import torch.nn as nn class FCN_mse(nn.Module): """ Predict whether pixels are part of the object or the background. """ def __init__(self, n_class): super().__init__() self.n_class = n_class self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(3, 16, kernel_size=5, padding=2) self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=2) self.classifier = nn.Conv2d(32, 1, kernel_size=1) def forward(self, x): c1 = torch.tanh(self.conv1(x)) c2 = torch.tanh(self.conv2(c1)) score = self.classifier(c2) return score def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'n_class': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_convolution_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (1, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_tanh_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_tanh_1[grid(524288)](buf3, primals_5, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(16384)](buf5, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3 class FCN_mseNew(nn.Module): """ Predict whether pixels are part of the object or the background. """ def __init__(self, n_class): super().__init__() self.n_class = n_class self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(3, 16, kernel_size=5, padding=2) self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=2) self.classifier = nn.Conv2d(32, 1, kernel_size=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.classifier.weight primals_7 = self.classifier.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AZdet/causal-infogan
FCN_mse
false
13,256
[ "MIT" ]
89
146b647863a27542ad4a1a01ddb033cdcab9843d
https://github.com/AZdet/causal-infogan/tree/146b647863a27542ad4a1a01ddb033cdcab9843d
PositionwiseFeedForward
import torch import torch.nn as nn import torch.nn.functional as F class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) self.w_2 = nn.Conv1d(d_hid, d_in, 1) self.layer_norm = nn.LayerNorm(d_in) self.dropout = nn.Dropout(dropout) def forward(self, x): residual = x output = x.transpose(1, 2) output = self.w_2(F.relu(self.w_1(output))) output = output.transpose(1, 2) output = self.dropout(output) output = self.layer_norm(output + residual) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf5 del buf6 del primals_7 return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4 class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) self.w_2 = nn.Conv1d(d_hid, d_in, 1) self.layer_norm = nn.LayerNorm(d_in) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Aleph0Inc/HDSA-Dialog
PositionwiseFeedForward
false
13,257
[ "MIT" ]
146
88e2604adb5dc38ae32205410b15b2ac39116ecd
https://github.com/Aleph0Inc/HDSA-Dialog/tree/88e2604adb5dc38ae32205410b15b2ac39116ecd
Categorical
import torch import torch.nn as nn class Categorical(nn.Module): def __init__(self): super().__init__() def forward(self, log_p): return torch.multinomial(log_p.exp(), 1).long().squeeze(1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = torch.ops.aten.multinomial.default(buf0, 1) del buf0 buf2 = buf1 del buf1 return reinterpret_tensor(buf2, (4,), (1,), 0), class CategoricalNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ArChiiii/TSP_DRL_PtrNet
Categorical
false
13,258
[ "MIT" ]
59
8218a508c563d9641b341dff5a6241d90e4e031b
https://github.com/ArChiiii/TSP_DRL_PtrNet/tree/8218a508c563d9641b341dff5a6241d90e4e031b
GatedConv2d
import torch import torch.nn as nn import torch.utils.data class GatedConv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None): super(GatedConv2d, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) def forward(self, x): if self.activation is None: h = self.h(x) else: h = self.activation(self.h(x)) g = self.sigmoid(self.g(x)) return h * g def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'output_channels': 4, 'kernel_size': 4, 'stride': 1, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1)) buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1)) buf1 = buf0 del buf0 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_mul_sigmoid_0[grid(1296)](buf1, buf3, primals_2, primals_5, buf4, 1296, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_5 return buf4, primals_1, primals_3, primals_4, buf1, buf3 class GatedConv2dNew(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None): super(GatedConv2dNew, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) def forward(self, input_0): primals_1 = self.h.weight primals_2 = self.h.bias primals_3 = self.g.weight primals_5 = self.g.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AWehenkel/UMNN
GatedConv2d
false
13,259
[ "BSD-3-Clause" ]
69
f93cb36040783dd60e14e0eda927899d3919825c
https://github.com/AWehenkel/UMNN/tree/f93cb36040783dd60e14e0eda927899d3919825c
ProdAttention
import torch import torch.nn as nn import torch.optim class ProdAttention(nn.Module): def __init__(self): super(ProdAttention, self).__init__() def forward(self, eh, dhx, ax=None): pax = eh * dhx pax = torch.sum(pax, dim=2) ax = nn.functional.softmax(pax, dim=1) sx = ax.unsqueeze(2) sx = torch.sum(eh * sx, dim=1, keepdim=True) return sx, ax def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf1 triton_poi_fused_mul_sum_3[grid(64)](arg0_1, buf2, buf3, 64, XBLOCK =64, num_warps=1, num_stages=1) del arg0_1 return buf3, buf2 class ProdAttentionNew(nn.Module): def __init__(self): super(ProdAttentionNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
AminJun/speech
ProdAttention
false
13,260
[ "Apache-2.0" ]
642
95149ca3780d8590a36d8f1adeb8d6508a0ff1cc
https://github.com/AminJun/speech/tree/95149ca3780d8590a36d8f1adeb8d6508a0ff1cc
L1_Charbonnier_loss
import torch import torch.nn as nn class L1_Charbonnier_loss(nn.Module): """L1 Charbonnierloss.""" def __init__(self): super(L1_Charbonnier_loss, self).__init__() self.eps = 1e-06 def forward(self, X, Y): diff = torch.add(X, -Y) error = torch.sqrt(diff * diff + self.eps) loss = torch.sum(error) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mul_neg_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp5 = 1e-06 tmp6 = tmp4 + tmp5 tmp7 = libdevice.sqrt(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_mul_neg_sqrt_sum_0[grid(1)](arg1_1, arg0_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class L1_Charbonnier_lossNew(nn.Module): """L1 Charbonnierloss.""" def __init__(self): super(L1_Charbonnier_lossNew, self).__init__() self.eps = 1e-06 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AnimatedRNG/pytorch-LapSRN
L1_Charbonnier_loss
false
13,261
[ "MIT" ]
270
1b7737abe6ccaef2d14b673d301edbace3414c02
https://github.com/AnimatedRNG/pytorch-LapSRN/tree/1b7737abe6ccaef2d14b673d301edbace3414c02
MaxPoolStride1
import torch import torch.nn as nn import torch.nn.functional as F class MaxPoolStride1(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, x): padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode='replicate') pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x) return pooled_x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp5 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * (1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp13 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1 ) * (1 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp21 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1 ) * (2 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPoolStride1New(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1New, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexRogalskiy/smart-social-distancing
MaxPoolStride1
false
13,262
[ "Apache-2.0" ]
113
2def6738038035e67ac79fc9b72ba072e190321f
https://github.com/AlexRogalskiy/smart-social-distancing/tree/2def6738038035e67ac79fc9b72ba072e190321f
FocalLoss
import torch import torch.nn as nn import torch.nn.functional as F class FocalLoss(nn.modules.loss._WeightedLoss): def __init__(self, weight=None, gamma=2, reduction='mean'): super(FocalLoss, self).__init__(weight, reduction=reduction) self.gamma = gamma self.weight = weight def forward(self, input, target): ce_loss = F.cross_entropy(input, target, reduction=self.reduction, weight=self.weight) pt = torch.exp(-ce_loss) focal_loss = ((1 - pt) ** self.gamma * ce_loss).mean() return focal_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = -tmp21 tmp23 = tl_math.exp(tmp22) tmp24 = 1.0 tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp26 * tmp21 tmp28 = tmp27 / tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1[grid (1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class FocalLossNew(nn.modules.loss._WeightedLoss): def __init__(self, weight=None, gamma=2, reduction='mean'): super(FocalLossNew, self).__init__(weight, reduction=reduction) self.gamma = gamma self.weight = weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AnassBenBouazza/Project-calibration-temperature_scaling
FocalLoss
false
13,263
[ "MIT" ]
724
cf96350f5e4349404fa092a97a71baf2bb7686ec
https://github.com/AnassBenBouazza/Project-calibration-temperature_scaling/tree/cf96350f5e4349404fa092a97a71baf2bb7686ec
Attn
import math import torch import torch.nn as nn import torch.nn.functional as F class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def forward(self, hidden, encoder_outputs): """ :param hidden: previous hidden state of the decoder, in shape (layers*directions,B,H) :param encoder_outputs: encoder outputs from Encoder, in shape (T,B,H) :return attention energies in shape (B,T) """ max_len = encoder_outputs.size(0) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) encoder_outputs = encoder_outputs.transpose(0, 1) attn_energies = self.score(H, encoder_outputs) return F.softmax(attn_energies, dim=1).unsqueeze(1) def score(self, hidden, encoder_outputs): cat = torch.cat([hidden, encoder_outputs], 2) energy = torch.tanh(self.attn(cat)) energy = energy.transpose(2, 1) v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'method': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x1 = xindex // 8 % 4 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_5, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_4[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf2, buf6, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0) class AttnNew(nn.Module): def __init__(self, method, hidden_size): super(AttnNew, self).__init__() self.method = method self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def score(self, hidden, encoder_outputs): cat = torch.cat([hidden, encoder_outputs], 2) energy = torch.tanh(self.attn(cat)) energy = energy.transpose(2, 1) v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def forward(self, input_0, input_1): primals_4 = self.v primals_3 = self.attn.weight primals_5 = self.attn.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Aleph0Inc/HDSA-Dialog
Attn
false
13,264
[ "MIT" ]
146
88e2604adb5dc38ae32205410b15b2ac39116ecd
https://github.com/Aleph0Inc/HDSA-Dialog/tree/88e2604adb5dc38ae32205410b15b2ac39116ecd
MultiHeadedAttention
import math import torch from torch import Tensor import torch.nn as nn class MultiHeadedAttention(nn.Module): """ Multi-Head Attention module from "Attention is All You Need" Implementation modified from OpenNMT-py. https://github.com/OpenNMT/OpenNMT-py """ def __init__(self, num_heads: 'int', size: 'int', dropout: 'float'=0.1): """ Create a multi-headed attention layer. :param num_heads: the number of heads :param size: model size (must be divisible by num_heads) :param dropout: probability of dropping a unit """ super().__init__() assert size % num_heads == 0 self.head_size = head_size = size // num_heads self.model_size = size self.num_heads = num_heads self.k_layer = nn.Linear(size, num_heads * head_size) self.v_layer = nn.Linear(size, num_heads * head_size) self.q_layer = nn.Linear(size, num_heads * head_size) self.output_layer = nn.Linear(size, size) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) def forward(self, k: 'Tensor', v: 'Tensor', q: 'Tensor', mask: 'Tensor' =None): """ Computes multi-headed attention. :param k: keys [B, M, D] with M being the sentence length. :param v: values [B, M, D] :param q: query [B, M, D] :param mask: optional mask [B, 1, M] :return: """ batch_size = k.size(0) num_heads = self.num_heads k = self.k_layer(k) v = self.v_layer(v) q = self.q_layer(q) k = k.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2) v = v.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2) q = q.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2) q = q / math.sqrt(self.head_size) scores = torch.matmul(q, k.transpose(2, 3)) if mask is not None: scores = scores.masked_fill(~mask.unsqueeze(1), float('-inf')) attention = self.softmax(scores) attention = self.dropout(attention) context = torch.matmul(attention, v) context = context.transpose(1, 2).contiguous().view(batch_size, -1, num_heads * self.head_size) output = self.output_layer(context) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_heads': 4, 'size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_div_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_div_0[grid(16, 16)](buf2, primals_8, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf4 = reinterpret_tensor(buf2, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf2 triton_poi_fused_clone_1[grid(16, 16)](buf0, primals_3, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_2[grid(256)](buf5, buf8, 256, 16, XBLOCK= 32, num_warps=4, num_stages=1) del buf5 buf9 = reinterpret_tensor(buf0, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf0 triton_poi_fused_clone_1[grid(16, 16)](buf1, primals_5, buf9, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_5 buf10 = reinterpret_tensor(buf1, (16, 16, 1), (16, 1, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_11 return reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0) class MultiHeadedAttentionNew(nn.Module): """ Multi-Head Attention module from "Attention is All You Need" Implementation modified from OpenNMT-py. https://github.com/OpenNMT/OpenNMT-py """ def __init__(self, num_heads: 'int', size: 'int', dropout: 'float'=0.1): """ Create a multi-headed attention layer. :param num_heads: the number of heads :param size: model size (must be divisible by num_heads) :param dropout: probability of dropping a unit """ super().__init__() assert size % num_heads == 0 self.head_size = head_size = size // num_heads self.model_size = size self.num_heads = num_heads self.k_layer = nn.Linear(size, num_heads * head_size) self.v_layer = nn.Linear(size, num_heads * head_size) self.q_layer = nn.Linear(size, num_heads * head_size) self.output_layer = nn.Linear(size, size) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1, input_2): primals_2 = self.k_layer.weight primals_3 = self.k_layer.bias primals_4 = self.v_layer.weight primals_5 = self.v_layer.bias primals_7 = self.q_layer.weight primals_8 = self.q_layer.bias primals_10 = self.output_layer.weight primals_11 = self.output_layer.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
AmitMY/joeynmt
MultiHeadedAttention
false
13,265
[ "Apache-2.0" ]
563
b30d1d53823ced56113def8fb5d5f7905d3c059f
https://github.com/AmitMY/joeynmt/tree/b30d1d53823ced56113def8fb5d5f7905d3c059f
SiLU
import torch import torch as th import torch.nn as nn class SiLU(nn.Module): def forward(self, x): return x * th.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SiLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AranKomat/Diff-DALLE
SiLU
false
13,266
[ "MIT" ]
53
9418e98e97b599c5c65f16ee168fedf76a29095f
https://github.com/AranKomat/Diff-DALLE/tree/9418e98e97b599c5c65f16ee168fedf76a29095f
L2
import torch import torch.nn as nn class L2(nn.Module): def __init__(self): super(L2, self).__init__() def forward(self, output, target): lossvalue = torch.norm(output - target, p=2, dim=1).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = 64.0 tmp24 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L2New(nn.Module): def __init__(self): super(L2New, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AnonymousAuthors444/VEC_VAD
L2
false
13,267
[ "MIT" ]
67
0072bf857030e621e2f9c12689407b81e45ed603
https://github.com/AnonymousAuthors444/VEC_VAD/tree/0072bf857030e621e2f9c12689407b81e45ed603
Flatten
import torch from torch import nn from torch.autograd import * from itertools import product as product from math import sqrt as sqrt class Flatten(nn.Module): def __init__(self): super(Flatten, self).__init__() def forward(self, x): x = x.transpose(3, 2).contiguous() return x.view(x.size(0), -1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.autograd import * from itertools import product as product from math import sqrt as sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 64), (64, 1), 0), class FlattenNew(nn.Module): def __init__(self): super(FlattenNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Aristochi/Dangerous_driving_behavior_detection
Flatten
false
13,268
[ "MIT" ]
96
596d0544c3ed8cbfbc322cc4cd7859a9ef539810
https://github.com/Aristochi/Dangerous_driving_behavior_detection/tree/596d0544c3ed8cbfbc322cc4cd7859a9ef539810
ScaledLeakyReLU
import math import torch from torch import nn import torch.nn.functional as F class ScaledLeakyReLU(nn.Module): def __init__(self, negative_slope=0.2): super().__init__() self.negative_slope = negative_slope def forward(self, input): out = F.leaky_relu(input, negative_slope=self.negative_slope) return out * math.sqrt(2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 1.4142135623730951 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ScaledLeakyReLUNew(nn.Module): def __init__(self, negative_slope=0.2): super().__init__() self.negative_slope = negative_slope def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ArashVahabpour/encoder4editing-contrastive
ScaledLeakyReLU
false
13,269
[ "MIT" ]
1,051
1b91afe1693e01a41118e1ce2451b7d14bec51f4
https://github.com/ArashVahabpour/encoder4editing-contrastive/tree/1b91afe1693e01a41118e1ce2451b7d14bec51f4
LocalConv2d
import torch import torch.nn as nn import torch.nn.functional as F class LocalConv2d(nn.Module): def __init__(self, num_rows, num_feats_in, num_feats_out, kernel=1, padding=0): super(LocalConv2d, self).__init__() self.num_rows = num_rows self.out_channels = num_feats_out self.kernel = kernel self.pad = padding self.group_conv = nn.Conv2d(num_feats_in * num_rows, num_feats_out * num_rows, kernel, stride=1, groups=num_rows) def forward(self, x): b, c, h, w = x.size() if self.pad: x = F.pad(x, (self.pad, self.pad, self.pad, self.pad), mode= 'constant', value=0) t = int(h / self.num_rows) x = x.unfold(2, t + self.pad * 2, t) x = x.permute([0, 2, 1, 4, 3]).contiguous() x = x.view(b, c * self.num_rows, t + self.pad * 2, w + self.pad * 2 ).contiguous() y = self.group_conv(x) y = y.view(b, self.num_rows, self.out_channels, t, w).contiguous() y = y.permute([0, 2, 1, 3, 4]).contiguous() y = y.view(b, self.out_channels, h, w) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_rows': 4, 'num_feats_in': 4, 'num_feats_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x5 = xindex // 4 % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 4, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 16, 1, 4), (64, 4, 0, 1), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf1, (4, 16, 1, 4), (64, 4, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 4, 1), torch .float32) triton_poi_fused_clone_1[grid(256)](buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (4, 16, 1, 4), (64, 4, 4, 1), 0) class LocalConv2dNew(nn.Module): def __init__(self, num_rows, num_feats_in, num_feats_out, kernel=1, padding=0): super(LocalConv2dNew, self).__init__() self.num_rows = num_rows self.out_channels = num_feats_out self.kernel = kernel self.pad = padding self.group_conv = nn.Conv2d(num_feats_in * num_rows, num_feats_out * num_rows, kernel, stride=1, groups=num_rows) def forward(self, input_0): primals_2 = self.group_conv.weight primals_3 = self.group_conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AnuragSahu/M3D-RPN
LocalConv2d
false
13,270
[ "MIT" ]
245
078ddfa0a7c48dc1d23e8da679997239ac62a72a
https://github.com/AnuragSahu/M3D-RPN/tree/078ddfa0a7c48dc1d23e8da679997239ac62a72a