entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
ExtendedModel
import torch import torch.nn as nn class ExtendedModel(nn.Module): def __init__(self, D_in, H, D_out): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ super(ExtendedModel, self).__init__() self.linear1 = nn.Linear(D_in, H) self.linear2 = nn.Linear(H, D_out) def forward(self, x, bias=0.0): """ In the forward function we accept a Tensor of input data and an optional bias """ h_relu = self.linear1(x).clamp(min=0) y_pred = self.linear2(h_relu) return y_pred + bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'D_in': 4, 'H': 4, 'D_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_0[grid(256)](buf0, primals_2, buf1, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf4 class ExtendedModelNew(nn.Module): def __init__(self, D_in, H, D_out): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ super(ExtendedModelNew, self).__init__() self.linear1 = nn.Linear(D_in, H) self.linear2 = nn.Linear(H, D_out) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
SID262000/BentoML
ExtendedModel
false
9,419
[ "Apache-2.0" ]
0
0708a6495e4d1f0ddf639026be768abf2d55410a
https://github.com/SID262000/BentoML/tree/0708a6495e4d1f0ddf639026be768abf2d55410a
Conv2dBlock
import torch from torch import nn import torch.nn.functional as F class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class Conv2dBlock(nn.Module): def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none', activation='relu', pad_type='zero', use_bias=True, activation_first =False): super(Conv2dBlock, self).__init__() self.use_bias = use_bias self.activation_first = activation_first if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = out_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=False) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=False) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias) def forward(self, x): if self.activation_first: if self.activation: x = self.activation(x) x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) else: x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4, 'ks': 4, 'st': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, primals_2, buf2 class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class Conv2dBlockNew(nn.Module): def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none', activation='relu', pad_type='zero', use_bias=True, activation_first =False): super(Conv2dBlockNew, self).__init__() self.use_bias = use_bias self.activation_first = activation_first if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = out_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=False) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=False) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
PredatorK9/GANwriting
Conv2dBlock
false
9,420
[ "MIT" ]
0
246d7e87152c98f0c6af999d619dc51190fad8ae
https://github.com/PredatorK9/GANwriting/tree/246d7e87152c98f0c6af999d619dc51190fad8ae
PreprocessAtari
import torch from torch import nn class PreprocessAtari(nn.Module): def forward(self, x): x = x.permute(0, 3, 1, 2).contiguous() return x / 255.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_div_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return buf0, class PreprocessAtariNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SemyonSemenov/mipt-rl-hw-2022
PreprocessAtari
false
9,421
[ "MIT" ]
0
923fd0b7e3f900c1a91ddf256c9b6f53a62d1653
https://github.com/SemyonSemenov/mipt-rl-hw-2022/tree/923fd0b7e3f900c1a91ddf256c9b6f53a62d1653
HardSigmoid
import torch from torch import nn import torch.nn.functional as F class HardSigmoid(nn.Module): def __init__(self, slope=0.2, offset=0.5): super().__init__() self.slope = slope self.offset = offset def forward(self, x): x = self.slope * x + self.offset x = F.threshold(-x, -1, -1) x = F.threshold(-x, 0, 0) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_neg_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.2 tmp2 = tmp0 * tmp1 tmp3 = 0.5 tmp4 = tmp2 + tmp3 tmp5 = -tmp4 tmp6 = -1.0 tmp7 = tmp5 <= tmp6 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = -tmp8 tmp10 = 0.0 tmp11 = tmp9 <= tmp10 tmp12 = tl.where(tmp11, tmp10, tmp9) tl.store(out_ptr0 + x0, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_neg_threshold_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardSigmoidNew(nn.Module): def __init__(self, slope=0.2, offset=0.5): super().__init__() self.slope = slope self.offset = offset def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LDOUBLEV/DBNet.pytorch
HardSigmoid
false
9,422
[ "Apache-2.0" ]
0
206f4a1e5cc3686284476f029a26fc69f610e898
https://github.com/LDOUBLEV/DBNet.pytorch/tree/206f4a1e5cc3686284476f029a26fc69f610e898
MaskL1Loss
import torch from torch import nn class MaskL1Loss(nn.Module): def __init__(self, eps=1e-06): super(MaskL1Loss, self).__init__() self.eps = eps def forward(self, pred: 'torch.Tensor', gt, mask): loss = (torch.abs(pred - gt) * mask).sum() / (mask.sum() + self.eps) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp5 = tmp3 * tmp4 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp4, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 1e-06 tmp13 = tmp11 + tmp12 tmp14 = tmp8 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mul_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class MaskL1LossNew(nn.Module): def __init__(self, eps=1e-06): super(MaskL1LossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
LDOUBLEV/DBNet.pytorch
MaskL1Loss
false
9,423
[ "Apache-2.0" ]
0
206f4a1e5cc3686284476f029a26fc69f610e898
https://github.com/LDOUBLEV/DBNet.pytorch/tree/206f4a1e5cc3686284476f029a26fc69f610e898
ResBlock
import torch import torch.nn as nn from torch.nn import functional as F class ResBlock(nn.Module): """Residual block with upsampling/downsampling. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. """ def __init__(self, in_channels, out_channels, mode='down'): super(ResBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1) self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False) if mode == 'down': self.scale_factor = 0.5 elif mode == 'up': self.scale_factor = 2 def forward(self, x): out = F.leaky_relu_(self.conv1(x), negative_slope=0.2) out = F.interpolate(out, scale_factor=self.scale_factor, mode= 'bilinear', align_corners=False) out = F.leaky_relu_(self.conv2(out), negative_slope=0.2) x = F.interpolate(x, scale_factor=self.scale_factor, mode= 'bilinear', align_corners=False) skip = self.skip(x) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x5 = xindex // 4 x2 = xindex // 4 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.2 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp18 = tmp17 + tmp1 tmp19 = tmp17 < 0 tmp20 = tl.where(tmp19, tmp18, tmp17) tmp21 = tl.load(in_ptr2 + (tmp20 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp22 = tmp21 + tmp10 tmp23 = tmp22 > tmp12 tmp24 = tmp22 * tmp14 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tmp25 - tmp16 tmp28 = tmp26 * tmp27 tmp29 = tmp16 + tmp28 tmp31 = tmp30 + tmp1 tmp32 = tmp30 < 0 tmp33 = tl.where(tmp32, tmp31, tmp30) tmp34 = tl.load(in_ptr2 + (tmp8 + 4 * tmp33 + 16 * x5), xmask, eviction_policy='evict_last') tmp35 = tmp34 + tmp10 tmp36 = tmp35 > tmp12 tmp37 = tmp35 * tmp14 tmp38 = tl.where(tmp36, tmp35, tmp37) tmp39 = tl.load(in_ptr2 + (tmp20 + 4 * tmp33 + 16 * x5), xmask, eviction_policy='evict_last') tmp40 = tmp39 + tmp10 tmp41 = tmp40 > tmp12 tmp42 = tmp40 * tmp14 tmp43 = tl.where(tmp41, tmp40, tmp42) tmp44 = tmp43 - tmp38 tmp45 = tmp44 * tmp27 tmp46 = tmp38 + tmp45 tmp47 = tmp46 - tmp29 tmp49 = tmp47 * tmp48 tmp50 = tmp29 + tmp49 tmp51 = tl.load(in_ptr8 + (tmp8 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp52 = tl.load(in_ptr8 + (tmp20 + 4 * tmp4 + 16 * x5), xmask, eviction_policy='evict_last') tmp53 = tmp52 - tmp51 tmp54 = tmp53 * tmp27 tmp55 = tmp51 + tmp54 tmp56 = tl.load(in_ptr8 + (tmp8 + 4 * tmp33 + 16 * x5), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr8 + (tmp20 + 4 * tmp33 + 16 * x5), xmask, eviction_policy='evict_last') tmp58 = tmp57 - tmp56 tmp59 = tmp58 * tmp27 tmp60 = tmp56 + tmp59 tmp61 = tmp60 - tmp55 tmp62 = tmp61 * tmp48 tmp63 = tmp55 + tmp62 tl.store(in_out_ptr0 + x4, tmp50, xmask) tl.store(in_out_ptr1 + x4, tmp63, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp9 = tmp7 + tmp8 tmp10 = tmp7 > tmp3 tl.store(in_out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((2, 1), (1, 1), torch.int64) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(2)](buf1, 2, XBLOCK=2, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((2, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_1[grid(2)](buf2, 2, XBLOCK=2, num_warps= 1, num_stages=1) buf3 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused__to_copy_0[grid(2)](buf3, 2, XBLOCK=2, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused_add_clamp_1[grid(2)](buf4, 2, XBLOCK=2, num_warps= 1, num_stages=1) buf5 = empty_strided_cuda((2,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(2)](buf5, 2, XBLOCK=2, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((2, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(2)](buf7, 2, XBLOCK=2, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf9 = buf8 del buf8 buf11 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf12 = buf11 del buf11 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3[ grid(64)](buf9, buf12, buf1, buf3, buf0, primals_2, buf4, buf5, buf2, buf7, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 2, 2), (16, 4, 2, 1)) buf13 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 4, 2, 2), (16, 4, 2, 1)) buf14 = buf13 del buf13 buf15 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4[grid (64)](buf14, buf10, primals_5, buf15, 64, XBLOCK=64, num_warps= 1, num_stages=1) del buf10 del primals_5 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256) ](buf0, primals_2, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf12, buf15, buf16) class ResBlockNew(nn.Module): """Residual block with upsampling/downsampling. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. """ def __init__(self, in_channels, out_channels, mode='down'): super(ResBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1) self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False) if mode == 'down': self.scale_factor = 0.5 elif mode == 'up': self.scale_factor = 2 def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.skip.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
PrimeshShamilka/GFPGAN
ResBlock
false
9,424
[ "BSD-3-Clause" ]
0
3ba48b932d41a4faa906e5cd39794b60845db708
https://github.com/PrimeshShamilka/GFPGAN/tree/3ba48b932d41a4faa906e5cd39794b60845db708
SEBlock
import torch from torch import nn import torch.nn.functional as F class HardSigmoid(nn.Module): def __init__(self, slope=0.2, offset=0.5): super().__init__() self.slope = slope self.offset = offset def forward(self, x): x = self.slope * x + self.offset x = F.threshold(-x, -1, -1) x = F.threshold(-x, 0, 0) return x class SEBlock(nn.Module): def __init__(self, in_channels, out_channels, ratio=4): super().__init__() num_mid_filter = out_channels // ratio self.pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= num_mid_filter, kernel_size=1, bias=True) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(in_channels=num_mid_filter, kernel_size=1, out_channels=out_channels, bias=True) self.relu2 = HardSigmoid() def forward(self, x): attn = self.pool(x) attn = self.conv1(attn) attn = self.relu1(attn) attn = self.conv2(attn) attn = self.relu2(attn) return x * attn def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_convolution_mul_neg_threshold_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = 0.5 tmp6 = tmp4 + tmp5 tmp7 = -tmp6 tmp8 = -1.0 tmp9 = tmp7 <= tmp8 tmp10 = tl.where(tmp9, tmp8, tmp7) tmp11 = -tmp10 tmp12 = 0.0 tmp13 = tmp11 <= tmp12 tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_convolution_mul_neg_threshold_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp3 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp5 = tmp3 + tmp4 tmp6 = 0.2 tmp7 = tmp5 * tmp6 tmp8 = 0.5 tmp9 = tmp7 + tmp8 tmp10 = -tmp9 tmp11 = -1.0 tmp12 = tl.where(tmp2, tmp11, tmp10) tmp13 = -tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp1, tmp14, tmp13) tmp16 = tmp0 * tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) triton_poi_fused_add_convolution_mul_neg_threshold_2[grid(16)](buf4, primals_5, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_convolution_mul_neg_threshold_3[grid(256)]( primals_1, buf6, buf5, buf4, primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 return buf7, primals_1, primals_2, primals_4, buf1, buf3, buf5, buf6 class HardSigmoid(nn.Module): def __init__(self, slope=0.2, offset=0.5): super().__init__() self.slope = slope self.offset = offset def forward(self, x): x = self.slope * x + self.offset x = F.threshold(-x, -1, -1) x = F.threshold(-x, 0, 0) return x class SEBlockNew(nn.Module): def __init__(self, in_channels, out_channels, ratio=4): super().__init__() num_mid_filter = out_channels // ratio self.pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= num_mid_filter, kernel_size=1, bias=True) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(in_channels=num_mid_filter, kernel_size=1, out_channels=out_channels, bias=True) self.relu2 = HardSigmoid() def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
LDOUBLEV/DBNet.pytorch
SEBlock
false
9,425
[ "Apache-2.0" ]
0
206f4a1e5cc3686284476f029a26fc69f610e898
https://github.com/LDOUBLEV/DBNet.pytorch/tree/206f4a1e5cc3686284476f029a26fc69f610e898
QNetwork
import torch import torch.nn.functional as F import torch.nn as nn class QNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=1024, fc2_units=512): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(QNetwork, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) def forward(self, state): """Build a network that maps state -> action values.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 1024), (1024, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (4, 512), (512, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf1, primals_2, buf6, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0 ), reinterpret_tensor(primals_4, (1024, 512), (1, 1024), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(32768)](buf3, primals_5, buf5, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0 ), reinterpret_tensor(buf3, (64, 512), (512, 1), 0 ), primals_6, buf5, primals_4, buf6 class QNetworkNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=1024, fc2_units=512): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(QNetworkNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
SagarRathod-TomTom/Navigation-Deep-Reinforcement-Learning-Nanodegree
QNetwork
false
9,426
[ "MIT" ]
0
a13597d5077785bd486d8ce528dc177685226b1c
https://github.com/SagarRathod-TomTom/Navigation-Deep-Reinforcement-Learning-Nanodegree/tree/a13597d5077785bd486d8ce528dc177685226b1c
SkipLastTargetChannelWrapper
import torch from torch import nn as nn from torch.nn import MSELoss class SkipLastTargetChannelWrapper(nn.Module): """ Loss wrapper which removes additional target channel """ def __init__(self, loss, squeeze_channel=False): super(SkipLastTargetChannelWrapper, self).__init__() self.loss = loss self.squeeze_channel = squeeze_channel def forward(self, input, target): assert target.size(1 ) > 1, 'Target tensor has a singleton channel dimension, cannot remove channel' target = target[:, :-1, ...] if self.squeeze_channel: target = torch.squeeze(target, dim=1) return self.loss(input, target) def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'loss': MSELoss()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex r0 = rindex % 48 r1 = rindex // 48 tmp0 = tl.load(in_ptr0 + r2, rmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = 192.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 3, 4, 4), (48, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SkipLastTargetChannelWrapperNew(nn.Module): """ Loss wrapper which removes additional target channel """ def __init__(self, loss, squeeze_channel=False): super(SkipLastTargetChannelWrapperNew, self).__init__() self.loss = loss self.squeeze_channel = squeeze_channel def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PerceptionComputingLab/PARSE2022
SkipLastTargetChannelWrapper
false
9,427
[ "Apache-2.0" ]
0
a34886ed9d06b424bc93953f1b2f79540ad9ebf6
https://github.com/PerceptionComputingLab/PARSE2022/tree/a34886ed9d06b424bc93953f1b2f79540ad9ebf6
ToRGB
from torch.autograd import Function import math import torch import torch.nn as nn import torch.nn.functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_2[grid(48)](primals_5, buf2, buf3, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
AsianZeus/Diverse-Facial-Edit
ToRGB
false
9,428
[ "Apache-2.0" ]
0
3d4b1b41546a08a1fa3cb164ade33e319806b12b
https://github.com/AsianZeus/Diverse-Facial-Edit/tree/3d4b1b41546a08a1fa3cb164ade33e319806b12b
BCEDiceLoss
import torch from torch import nn as nn def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ C = tensor.size(1) axis_order = (1, 0) + tuple(range(2, tensor.dim())) transposed = tensor.permute(axis_order) return transposed.contiguous().view(C, -1) def compute_per_channel_dice(input, target, epsilon=1e-06, weight=None): """ Computes DiceCoefficient as defined in https://arxiv.org/abs/1606.04797 given a multi channel input and target. Assumes the input is a normalized probability, e.g. a result of Sigmoid or Softmax function. Args: input (torch.Tensor): NxCxSpatial input tensor target (torch.Tensor): NxCxSpatial target tensor epsilon (float): prevents division by zero weight (torch.Tensor): Cx1 tensor of weight per channel/class """ assert input.size() == target.size( ), "'input' and 'target' must have the same shape" input = flatten(input) target = flatten(target) target = target.float() intersect = (input * target).sum(-1) if weight is not None: intersect = weight * intersect denominator = (input * input).sum(-1) + (target * target).sum(-1) return 2 * (intersect / denominator.clamp(min=epsilon)) class _AbstractDiceLoss(nn.Module): """ Base class for different implementations of Dice loss. """ def __init__(self, weight=None, normalization='sigmoid'): super(_AbstractDiceLoss, self).__init__() self.register_buffer('weight', weight) assert normalization in ['sigmoid', 'softmax', 'none'] if normalization == 'sigmoid': self.normalization = nn.Sigmoid() elif normalization == 'softmax': self.normalization = nn.Softmax(dim=1) else: self.normalization = lambda x: x def dice(self, input, target, weight): raise NotImplementedError def forward(self, input, target): input = self.normalization(input) per_channel_dice = self.dice(input, target, weight=self.weight) return 1.0 - torch.mean(per_channel_dice) class DiceLoss(_AbstractDiceLoss): """Computes Dice Loss according to https://arxiv.org/abs/1606.04797. For multi-class segmentation `weight` parameter can be used to assign different weights per class. The input to the loss function is assumed to be a logit and will be normalized by the Sigmoid function. """ def __init__(self, weight=None, normalization='sigmoid'): super().__init__(weight, normalization) def dice(self, input, target, weight): return compute_per_channel_dice(input, target, weight=self.weight) class BCEDiceLoss(nn.Module): """Linear combination of BCE and Dice losses""" def __init__(self, alpha, beta): super(BCEDiceLoss, self).__init__() self.alpha = alpha self.bce = nn.BCEWithLogitsLoss() self.beta = beta self.dice = DiceLoss() def forward(self, input, target): return self.alpha * self.bce(input, target) + self.beta * self.dice( input, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4, 'beta': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tmp1 * tmp1 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp2 * tmp2 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp12, xmask) tl.store(out_ptr2 + x0, tmp17, xmask) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_out_ptr0 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, 1]) tmp3 = tmp1 + tmp2 tmp4 = 1e-06 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp0 / tmp5 tmp7 = 2.0 tmp8 = tmp6 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 4.0 tmp17 = tmp15 * tmp16 tmp18 = tmp11 / tmp16 tmp19 = 1.0 tmp20 = tmp19 - tmp18 tmp21 = tmp20 * tmp16 tmp22 = tmp17 + tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2[ grid(1)](buf5, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 del buf3 return buf5, def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ C = tensor.size(1) axis_order = (1, 0) + tuple(range(2, tensor.dim())) transposed = tensor.permute(axis_order) return transposed.contiguous().view(C, -1) def compute_per_channel_dice(input, target, epsilon=1e-06, weight=None): """ Computes DiceCoefficient as defined in https://arxiv.org/abs/1606.04797 given a multi channel input and target. Assumes the input is a normalized probability, e.g. a result of Sigmoid or Softmax function. Args: input (torch.Tensor): NxCxSpatial input tensor target (torch.Tensor): NxCxSpatial target tensor epsilon (float): prevents division by zero weight (torch.Tensor): Cx1 tensor of weight per channel/class """ assert input.size() == target.size( ), "'input' and 'target' must have the same shape" input = flatten(input) target = flatten(target) target = target.float() intersect = (input * target).sum(-1) if weight is not None: intersect = weight * intersect denominator = (input * input).sum(-1) + (target * target).sum(-1) return 2 * (intersect / denominator.clamp(min=epsilon)) class _AbstractDiceLoss(nn.Module): """ Base class for different implementations of Dice loss. """ def __init__(self, weight=None, normalization='sigmoid'): super(_AbstractDiceLoss, self).__init__() self.register_buffer('weight', weight) assert normalization in ['sigmoid', 'softmax', 'none'] if normalization == 'sigmoid': self.normalization = nn.Sigmoid() elif normalization == 'softmax': self.normalization = nn.Softmax(dim=1) else: self.normalization = lambda x: x def dice(self, input, target, weight): raise NotImplementedError def forward(self, input, target): input = self.normalization(input) per_channel_dice = self.dice(input, target, weight=self.weight) return 1.0 - torch.mean(per_channel_dice) class DiceLoss(_AbstractDiceLoss): """Computes Dice Loss according to https://arxiv.org/abs/1606.04797. For multi-class segmentation `weight` parameter can be used to assign different weights per class. The input to the loss function is assumed to be a logit and will be normalized by the Sigmoid function. """ def __init__(self, weight=None, normalization='sigmoid'): super().__init__(weight, normalization) def dice(self, input, target, weight): return compute_per_channel_dice(input, target, weight=self.weight) class BCEDiceLossNew(nn.Module): """Linear combination of BCE and Dice losses""" def __init__(self, alpha, beta): super(BCEDiceLossNew, self).__init__() self.alpha = alpha self.bce = nn.BCEWithLogitsLoss() self.beta = beta self.dice = DiceLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PerceptionComputingLab/PARSE2022
BCEDiceLoss
false
9,430
[ "Apache-2.0" ]
0
a34886ed9d06b424bc93953f1b2f79540ad9ebf6
https://github.com/PerceptionComputingLab/PARSE2022/tree/a34886ed9d06b424bc93953f1b2f79540ad9ebf6
ActFirstResBlock
import torch from torch import nn import torch.nn.functional as F class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class Conv2dBlock(nn.Module): def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none', activation='relu', pad_type='zero', use_bias=True, activation_first =False): super(Conv2dBlock, self).__init__() self.use_bias = use_bias self.activation_first = activation_first if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = out_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=False) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=False) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias) def forward(self, x): if self.activation_first: if self.activation: x = self.activation(x) x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) else: x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class ActFirstResBlock(nn.Module): def __init__(self, fin, fout, fhid=None, activation='lrelu', norm='none'): super().__init__() self.learned_shortcut = fin != fout self.fin = fin self.fout = fout self.fhid = min(fin, fout) if fhid is None else fhid self.conv_0 = Conv2dBlock(self.fin, self.fhid, 3, 1, padding=1, pad_type='reflect', norm=norm, activation=activation, activation_first=True) self.conv_1 = Conv2dBlock(self.fhid, self.fout, 3, 1, padding=1, pad_type='reflect', norm=norm, activation=activation, activation_first=True) if self.learned_shortcut: self.conv_s = Conv2dBlock(self.fin, self.fout, 1, 1, activation ='none', use_bias=False) def forward(self, x): x_s = self.conv_s(x) if self.learned_shortcut else x dx = self.conv_0(x) dx = self.conv_1(dx) out = x_s + dx return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'fin': 4, 'fout': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.2 tmp5 = tmp3 * tmp4 tmp6 = tl.where(tmp0, tmp3, tmp5) tl.store(out_ptr0 + x5, tmp6, xmask) @triton.jit def triton_poi_fused_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2[grid(576)]( buf2, buf1, primals_3, buf3, 576, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_add_convolution_3[grid(256)](buf5, primals_1, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_5 return buf5, primals_2, primals_4, buf0, buf2, buf3 class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class Conv2dBlock(nn.Module): def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none', activation='relu', pad_type='zero', use_bias=True, activation_first =False): super(Conv2dBlock, self).__init__() self.use_bias = use_bias self.activation_first = activation_first if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = out_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=False) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=False) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias) def forward(self, x): if self.activation_first: if self.activation: x = self.activation(x) x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) else: x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class ActFirstResBlockNew(nn.Module): def __init__(self, fin, fout, fhid=None, activation='lrelu', norm='none'): super().__init__() self.learned_shortcut = fin != fout self.fin = fin self.fout = fout self.fhid = min(fin, fout) if fhid is None else fhid self.conv_0 = Conv2dBlock(self.fin, self.fhid, 3, 1, padding=1, pad_type='reflect', norm=norm, activation=activation, activation_first=True) self.conv_1 = Conv2dBlock(self.fhid, self.fout, 3, 1, padding=1, pad_type='reflect', norm=norm, activation=activation, activation_first=True) if self.learned_shortcut: self.conv_s = Conv2dBlock(self.fin, self.fout, 1, 1, activation ='none', use_bias=False) def forward(self, input_0): primals_2 = self.conv_0.conv.weight primals_3 = self.conv_0.conv.bias primals_4 = self.conv_1.conv.weight primals_5 = self.conv_1.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
PredatorK9/GANwriting
ActFirstResBlock
false
9,431
[ "MIT" ]
0
246d7e87152c98f0c6af999d619dc51190fad8ae
https://github.com/PredatorK9/GANwriting/tree/246d7e87152c98f0c6af999d619dc51190fad8ae
GatedFusion
import torch import torch.nn as nn import torch.utils.data import torch.multiprocessing import torch.nn.modules.loss from scipy.sparse import * class GatedFusion(nn.Module): def __init__(self, hidden_size): super(GatedFusion, self).__init__() """GatedFusion module""" self.fc_z = nn.Linear(4 * hidden_size, hidden_size, bias=True) def forward(self, h_state, input): z = torch.sigmoid(self.fc_z(torch.cat([h_state, input, h_state * input, h_state - input], -1))) h_state = (1 - z) * h_state + z * input return h_state def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch.multiprocessing import torch.nn.modules.loss from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tmp3 * tmp4 tmp7 = tmp1 * tmp6 tmp8 = tmp5 + tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1024)](primals_1, primals_2, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_1[grid(256)](buf1, primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_1, primals_2, reinterpret_tensor(buf0, (64, 16), ( 16, 1), 0), buf1 class GatedFusionNew(nn.Module): def __init__(self, hidden_size): super(GatedFusionNew, self).__init__() """GatedFusion module""" self.fc_z = nn.Linear(4 * hidden_size, hidden_size, bias=True) def forward(self, input_0, input_1): primals_3 = self.fc_z.weight primals_4 = self.fc_z.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
LucasAPayne/graph4nlp
GatedFusion
false
9,432
[ "Apache-2.0" ]
0
3b72308f6ed9ce04c535f78b4b21b6ae0a8f5421
https://github.com/LucasAPayne/graph4nlp/tree/3b72308f6ed9ce04c535f78b4b21b6ae0a8f5421
Net
import torch import torch.fft import torch.nn.functional as torchf class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Conv2d(2, 4, 3, padding=1) self.conv2 = torch.nn.Conv2d(4, 4, 3, padding=1) self.conv3 = torch.nn.Conv2d(4, 2, 3, padding=1) def forward(self, x): x = torchf.relu(self.conv1(x)) x = torchf.relu(self.conv2(x)) x = self.conv3(x) return x def get_inputs(): return [torch.rand([4, 2, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 2 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 2, 64, 64), (8192, 4096, 64, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(65536)](buf1, primals_2, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(65536)](buf3, primals_5, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 2, 64, 64), (8192, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_1[grid(32768)](buf5, primals_7, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3 class NetNew(torch.nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = torch.nn.Conv2d(2, 4, 3, padding=1) self.conv2 = torch.nn.Conv2d(4, 4, 3, padding=1) self.conv3 = torch.nn.Conv2d(4, 2, 3, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Sh0cktr4p/PhiFlow
Net
false
9,433
[ "MIT" ]
0
cc87c5887bc3abfa1ef3c03252122a06e9fd2c18
https://github.com/Sh0cktr4p/PhiFlow/tree/cc87c5887bc3abfa1ef3c03252122a06e9fd2c18
PositionwiseFeedForward
import torch import torch.nn as nn import torch.nn.functional as F class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, x): residual = x x = self.w_2(F.relu(self.w_1(x))) x = self.dropout(x) x += residual x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_1, buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del buf4 del primals_7 return buf5, primals_1, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6 class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
PINE4PPLE/transformer-lm
PositionwiseFeedForward
false
9,434
[ "MIT" ]
0
da76a4afd29d1fd023ba866ccc21a49901ad46f2
https://github.com/PINE4PPLE/transformer-lm/tree/da76a4afd29d1fd023ba866ccc21a49901ad46f2
SeparableBlock
from torch.nn import Module import torch from torch.nn import Linear class SeparableBlock(Module): def __init__(self, input_size, kernel_channels_in, kernel_channels_out, kernel_size): super(SeparableBlock, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.kernel_channels_in = kernel_channels_in self.kernel_channels_out = kernel_channels_out self.make_kernel_in = Linear(input_size, kernel_size * kernel_size * kernel_channels_in) self.make_kernel_out = Linear(input_size, kernel_size * kernel_size * kernel_channels_out) self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in) self.kernel_linear_out = Linear(kernel_channels_out, kernel_channels_out) def forward(self, features): features = features.view(-1, self.input_size) kernel_in = self.make_kernel_in(features).view(-1, self.kernel_size, self.kernel_size, 1, self.kernel_channels_in) kernel_out = self.make_kernel_out(features).view(-1, self. kernel_size, self.kernel_size, self.kernel_channels_out, 1) kernel = torch.matmul(kernel_out, kernel_in) kernel = self.kernel_linear_in(kernel).permute(0, 1, 2, 4, 3) kernel = self.kernel_linear_out(kernel) kernel = kernel.permute(0, 4, 3, 1, 2) return kernel def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'kernel_channels_in': 4, 'kernel_channels_out': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 4), (4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 64), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((1024, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (1024, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf0, (1024, 1, 4), (4, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4096, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (4096, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((64, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4096, 4)](buf3, primals_7, buf4, 4096, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_7 buf5 = buf3 del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (4096, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (64, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_1[grid(16384)](buf6, primals_9, 16384, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 return reinterpret_tensor(buf6, (64, 4, 4, 4, 4), (256, 1, 4, 64, 16), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (4096, 4), (4, 1), 0), reinterpret_tensor( buf4, (4096, 4), (4, 1), 0), primals_8, primals_6, reinterpret_tensor( buf1, (1024, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (1024, 4, 1), (4, 1, 4), 0) class SeparableBlockNew(Module): def __init__(self, input_size, kernel_channels_in, kernel_channels_out, kernel_size): super(SeparableBlockNew, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.kernel_channels_in = kernel_channels_in self.kernel_channels_out = kernel_channels_out self.make_kernel_in = Linear(input_size, kernel_size * kernel_size * kernel_channels_in) self.make_kernel_out = Linear(input_size, kernel_size * kernel_size * kernel_channels_out) self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in) self.kernel_linear_out = Linear(kernel_channels_out, kernel_channels_out) def forward(self, input_0): primals_2 = self.make_kernel_in.weight primals_3 = self.make_kernel_in.bias primals_4 = self.make_kernel_out.weight primals_5 = self.make_kernel_out.bias primals_6 = self.kernel_linear_in.weight primals_7 = self.kernel_linear_in.bias primals_8 = self.kernel_linear_out.weight primals_9 = self.kernel_linear_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
RoyNijhuis/FaceFormer
SeparableBlock
false
9,435
[ "Apache-2.0", "BSD-2-Clause", "MIT" ]
0
197d6598b705b988a4ad275c2333bcde6a5eaf9f
https://github.com/RoyNijhuis/FaceFormer/tree/197d6598b705b988a4ad275c2333bcde6a5eaf9f
GlobalAvgPool2d
import torch from torch import nn class GlobalAvgPool2d(nn.Module): """Performs global average pooling over the entire height and width of a batched 2D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.avg_pool2d(input, kernel_size=input.size()[2:] ).view(-1, input.size(1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class GlobalAvgPool2dNew(nn.Module): """Performs global average pooling over the entire height and width of a batched 2D tensor # Arguments input: Input tensor """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Shadowalker1995/few-shot
GlobalAvgPool2d
false
9,436
[ "MIT" ]
0
68026f4d5d092b9cb7cc3b50ba8d28ca1b70ade9
https://github.com/Shadowalker1995/few-shot/tree/68026f4d5d092b9cb7cc3b50ba8d28ca1b70ade9
GlobalMaxPool1d
import torch from torch import nn class GlobalMaxPool1d(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.max_pool1d(input, kernel_size=input.size()[2:] ).view(-1, input.size(1)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class GlobalMaxPool1dNew(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Shadowalker1995/few-shot
GlobalMaxPool1d
false
9,437
[ "MIT" ]
0
68026f4d5d092b9cb7cc3b50ba8d28ca1b70ade9
https://github.com/Shadowalker1995/few-shot/tree/68026f4d5d092b9cb7cc3b50ba8d28ca1b70ade9
SpatialAttention
import torch from torch import nn class SpatialAttention(nn.Module): def __init__(self, kernel_size=7): super(SpatialAttention, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): avg_out = torch.mean(x, dim=1, keepdim=True) max_out, _ = torch.max(x, dim=1, keepdim=True) x = torch.cat([avg_out, max_out], dim=1) x = self.conv1(x) return self.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp16, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp15, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_sigmoid_1[grid(64)](buf2, 64, XBLOCK=64, num_warps =1, num_stages=1) return buf2, primals_2, buf0, buf2 class SpatialAttentionNew(nn.Module): def __init__(self, kernel_size=7): super(SpatialAttentionNew, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv1.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Panpan-Chen/Attention-Block-U-net
SpatialAttention
false
9,438
[ "MIT" ]
0
7e0cef46ea485db1bb9a9e4511eb0535e460179e
https://github.com/Panpan-Chen/Attention-Block-U-net/tree/7e0cef46ea485db1bb9a9e4511eb0535e460179e
StdConv2d
import torch import torch.nn as nn import torch.nn.functional as F class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-05) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_sqrt_sub_var_mean_0[grid(4)](buf3, primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_1[grid(16)](buf6, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf6, primals_1, primals_3, buf3, buf4 class StdConv2dNew(nn.Conv2d): def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Quallle/TransUNet
StdConv2d
false
9,439
[ "Apache-2.0" ]
0
cf62a2a021e096c105b3fc62958a1eeb231e7a8f
https://github.com/Quallle/TransUNet/tree/cf62a2a021e096c105b3fc62958a1eeb231e7a8f
SelfAttention
import torch import torch.nn as nn import torch.utils.data import torch.multiprocessing import torch.nn.modules.loss from scipy.sparse import * class SelfAttention(nn.Module): def __init__(self, input_size, hidden_size): super(SelfAttention, self).__init__() self.W1 = torch.Tensor(input_size, hidden_size) self.W1 = nn.Parameter(nn.init.xavier_uniform_(self.W1)) self.W2 = torch.Tensor(hidden_size, 1) self.W2 = nn.Parameter(nn.init.xavier_uniform_(self.W2)) def forward(self, x, attention_mask=None): attention = torch.mm(torch.tanh(torch.mm(x.view(-1, x.size(-1)), self.W1)), self.W2).view(x.size(0), -1) if attention_mask is not None: attention = attention.masked_fill_((1 - attention_mask).bool(), -INF) probs = torch.softmax(attention, dim=-1).unsqueeze(1) weighted_x = torch.bmm(probs, x).squeeze(1) return weighted_x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data import torch.multiprocessing import torch.nn.modules.loss from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 0, 1), 0 ), primals_1, out=buf5) del buf4 return reinterpret_tensor(buf5, (4, 4), (4, 1), 0 ), primals_1, buf1, buf2, reinterpret_tensor(primals_3, (1, 4), (1, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, input_size, hidden_size): super(SelfAttentionNew, self).__init__() self.W1 = torch.Tensor(input_size, hidden_size) self.W1 = nn.Parameter(nn.init.xavier_uniform_(self.W1)) self.W2 = torch.Tensor(hidden_size, 1) self.W2 = nn.Parameter(nn.init.xavier_uniform_(self.W2)) def forward(self, input_0): primals_2 = self.W1 primals_3 = self.W2 primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
LucasAPayne/graph4nlp
SelfAttention
false
9,440
[ "Apache-2.0" ]
0
3b72308f6ed9ce04c535f78b4b21b6ae0a8f5421
https://github.com/LucasAPayne/graph4nlp/tree/3b72308f6ed9ce04c535f78b4b21b6ae0a8f5421
NCESoftmaxLoss
import torch from torch import nn import torch.utils.data class NCESoftmaxLoss(nn.Module): def __init__(self): super(NCESoftmaxLoss, self).__init__() self.criterion = nn.CrossEntropyLoss() def forward(self, x, label): x.shape[0] x = x.squeeze() loss = self.criterion(x, label) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class NCESoftmaxLossNew(nn.Module): def __init__(self): super(NCESoftmaxLossNew, self).__init__() self.criterion = nn.CrossEntropyLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Shreyas-Gururaj/Point_Contrast_ME0.5.3
NCESoftmaxLoss
false
9,441
[ "MIT" ]
0
72bc78001b0b4529ca96f193764dcac0c5a0ce0f
https://github.com/Shreyas-Gururaj/Point_Contrast_ME0.5.3/tree/72bc78001b0b4529ca96f193764dcac0c5a0ce0f
Downsample
import torch import torch.nn as nn class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = 0, 1, 0, 1 x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'with_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(400)](primals_1, buf0, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0 class DownsampleNew(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Rm1n90/SDEdit
Downsample
false
9,442
[ "MIT" ]
0
16bfa4f5d37cd32680359db3405af4ea40a9cd1b
https://github.com/Rm1n90/SDEdit/tree/16bfa4f5d37cd32680359db3405af4ea40a9cd1b
Upsample
import torch import torch.nn as nn class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode='nearest' ) if self.with_conv: x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'with_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class UpsampleNew(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Rm1n90/SDEdit
Upsample
false
9,443
[ "MIT" ]
0
16bfa4f5d37cd32680359db3405af4ea40a9cd1b
https://github.com/Rm1n90/SDEdit/tree/16bfa4f5d37cd32680359db3405af4ea40a9cd1b
ThresholdedRelu
import torch from torch import nn import torch.onnx class ThresholdedRelu(nn.Module): def __init__(self, alpha=1.0): self.alpha = alpha super().__init__() def forward(self, X: 'torch.Tensor'): Y = torch.clamp(X, min=self.alpha) Y[Y == self.alpha] = 0.0 return Y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp2 == tmp1 tmp4 = 0.0 tmp5 = tl.where(tmp3, tmp4, tmp2) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_index_put_lift_fresh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ThresholdedReluNew(nn.Module): def __init__(self, alpha=1.0): self.alpha = alpha super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Piteryo/onnx2pytorch
ThresholdedRelu
false
9,444
[ "Apache-2.0" ]
0
c25b3a5289ee7073d644d280a112c15382b7f690
https://github.com/Piteryo/onnx2pytorch/tree/c25b3a5289ee7073d644d280a112c15382b7f690
Transition
import torch import torch.nn as nn class Transition(nn.Module): def __init__(self, in_features, out_features, act_layer=nn.GELU): super(Transition, self).__init__() self.act = act_layer() self.linear = nn.Linear(in_features, out_features) def forward(self, x): x = self.linear(x) x = self.act(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class TransitionNew(nn.Module): def __init__(self, in_features, out_features, act_layer=nn.GELU): super(TransitionNew, self).__init__() self.act = act_layer() self.linear = nn.Linear(in_features, out_features) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Roxbili/T2T-ViT
Transition
false
9,445
[ "BSD-3-Clause-Clear" ]
0
c5442bc560ea15b421130f13e31c4b68f52c1e5a
https://github.com/Roxbili/T2T-ViT/tree/c5442bc560ea15b421130f13e31c4b68f52c1e5a
VectorQuantizer
import torch from torch import Tensor from torch import nn from torch.nn import functional as F class VectorQuantizer(nn.Module): """ Reference: [1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta: 'float'=0.25): super(VectorQuantizer, self).__init__() self.K = num_embeddings self.D = embedding_dim self.beta = beta self.embedding = nn.Embedding(self.K, self.D) self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K) def forward(self, latents: 'Tensor') ->Tensor: latents = latents.permute(0, 2, 3, 1).contiguous() latents_shape = latents.shape flat_latents = latents.view(-1, self.D) dist = torch.sum(flat_latents ** 2, dim=1, keepdim=True) + torch.sum( self.embedding.weight ** 2, dim=1) - 2 * torch.matmul(flat_latents, self.embedding.weight.t()) encoding_inds = torch.argmin(dist, dim=1).unsqueeze(1) device = latents.device encoding_one_hot = torch.zeros(encoding_inds.size(0), self.K, device=device) encoding_one_hot.scatter_(1, encoding_inds, 1) quantized_latents = torch.matmul(encoding_one_hot, self.embedding. weight) quantized_latents = quantized_latents.view(latents_shape) commitment_loss = F.mse_loss(quantized_latents.detach(), latents) embedding_loss = F.mse_loss(quantized_latents, latents.detach()) vq_loss = commitment_loss * self.beta + embedding_loss quantized_latents = latents + (quantized_latents - latents).detach() return quantized_latents.permute(0, 3, 1, 2).contiguous(), vq_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_embeddings': 4, 'embedding_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_view_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (y0 // 16) + y0 % 16), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mul_pow_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tl.store(in_out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_argmin_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 < tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 < tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 < tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_scatter_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_per_fused_add_clone_mse_loss_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r1 = rindex // 4 % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr1 + (r1 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 0.25 tmp10 = tmp8 * tmp9 tmp11 = tmp10 + tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) @triton.jit def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp2 = tmp1 - tmp0 tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + (x2 + 16 * y3), tmp3, xmask & ymask) @triton.jit def triton_poi_fused_clone_mse_loss_mse_loss_backward_6(in_out_ptr0, in_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 0.0078125 tmp4 = tmp2 * tmp3 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_view_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4 ), 0), out=buf1) buf2 = buf1 del buf1 triton_poi_fused_add_mul_pow_sub_sum_1[grid(256)](buf2, buf0, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_argmin_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused_scatter_3[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf5 = buf0 del buf0 extern_kernels.mm(buf4, primals_2, out=buf5) del primals_2 buf6 = empty_strided_cuda((), (), torch.float32) buf9 = buf6 del buf6 triton_per_fused_add_clone_mse_loss_mul_4[grid(1)](buf9, buf5, primals_1, 1, 256, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(16, 16)](primals_1, buf5, buf7, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_clone_mse_loss_mse_loss_backward_6[grid(64, 4)](buf8, primals_1, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 return buf7, buf9, buf8, reinterpret_tensor(buf4, (4, 64), (1, 4), 0) class VectorQuantizerNew(nn.Module): """ Reference: [1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta: 'float'=0.25): super(VectorQuantizerNew, self).__init__() self.K = num_embeddings self.D = embedding_dim self.beta = beta self.embedding = nn.Embedding(self.K, self.D) self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K) def forward(self, input_0): primals_2 = self.embedding.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
OmeGaNo1/PyTorch-VAE
VectorQuantizer
false
9,446
[ "Apache-2.0" ]
0
e7b6aad70682b574c947947733794b4246a48838
https://github.com/OmeGaNo1/PyTorch-VAE/tree/e7b6aad70682b574c947947733794b4246a48838
LinearZeros
import torch import torch.nn as nn class LinearZeros(nn.Linear): def __init__(self, in_channels, out_channels, logscale_factor=3): super().__init__(in_channels, out_channels) self.logscale_factor = logscale_factor self.register_parameter('logs', nn.Parameter(torch.zeros(out_channels)) ) self.weight.data.zero_() self.bias.data.zero_() def forward(self, input): output = super().forward(input) return output * torch.exp(self.logs * self.logscale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_0[grid(256)](buf0, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class LinearZerosNew(nn.Linear): def __init__(self, in_channels, out_channels, logscale_factor=3): super().__init__(in_channels, out_channels) self.logscale_factor = logscale_factor self.register_parameter('logs', nn.Parameter(torch.zeros(out_channels)) ) self.weight.data.zero_() self.bias.data.zero_() def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_4 = self.logs primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ShreyDixit/glow-pytorch
LinearZeros
false
9,447
[ "MIT" ]
0
a964ba181898183c41f6ec6122a71b925ac33efa
https://github.com/ShreyDixit/glow-pytorch/tree/a964ba181898183c41f6ec6122a71b925ac33efa
PRelu
import torch from torch import nn import torch.onnx class PRelu(nn.Module): def forward(self, X: 'torch.Tensor', slope: 'torch.Tensor'): return torch.clamp(X, min=0) + torch.clamp(X, max=0) * slope def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = triton_helpers.minimum(tmp0, tmp1) tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class PReluNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Piteryo/onnx2pytorch
PRelu
false
9,448
[ "Apache-2.0" ]
0
c25b3a5289ee7073d644d280a112c15382b7f690
https://github.com/Piteryo/onnx2pytorch/tree/c25b3a5289ee7073d644d280a112c15382b7f690
FCTestNN
import torch import torch.nn as nn import torch.nn.functional as F class FCTestNN(nn.Module): def __init__(self, class_size): super(FCTestNN, self).__init__() self.name = 'FCTestNN' self.fc1 = nn.Linear(3 * 224 * 224, 256) self.fc2 = nn.Linear(256, class_size) def forward(self, x): x = x.view(-1, 3 * 224 * 224) x = F.relu(self.fc1(x)) x = self.fc2(x) x = x.squeeze(1) return x def get_inputs(): return [torch.rand([4, 150528])] def get_init_inputs(): return [[], {'class_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 150528), (150528, 1)) assert_size_stride(primals_2, (256, 150528), (150528, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (4, 256), (256, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (150528, 256), (1, 150528), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, primals_1, buf1, primals_4 class FCTestNNNew(nn.Module): def __init__(self, class_size): super(FCTestNNNew, self).__init__() self.name = 'FCTestNN' self.fc1 = nn.Linear(3 * 224 * 224, 256) self.fc2 = nn.Linear(256, class_size) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
NirooshKa/APS360-Cold-Start-Problem
FCTestNN
false
9,449
[ "MIT" ]
0
4c864737b4e6db992e99610a0ed8e82c957fd6cc
https://github.com/NirooshKa/APS360-Cold-Start-Problem/tree/4c864737b4e6db992e99610a0ed8e82c957fd6cc
UsedIndices
import torch from torch import nn import torch.onnx class UsedIndices(nn.Module): def __init__(self): super().__init__() self.mp = nn.MaxPool2d(kernel_size=[3, 3], stride=[2, 2], ceil_mode =True, return_indices=True) def forward(self, x): y, indices = self.mp(x) return y - 42, indices + 42 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_max_pool2d_with_indices_sub_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x4 = xindex // 2 x3 = xindex tmp0 = 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 1 + 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 2 + 2 * x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (2 + 2 * x0 + 8 * x4), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 1 + 2 * x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), tmp30 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), tmp33 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (6 + 2 * x0 + 8 * x4), tmp36 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 2 + 2 * x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (8 + 2 * x0 + 8 * x4), tmp43 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (9 + 2 * x0 + 8 * x4), tmp46 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (10 + 2 * x0 + 8 * x4), tmp49 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tmp77 = 42.0 tmp78 = tmp51 - tmp77 tmp79 = tl.full([1], 3, tl.int32) tmp80 = tl.where((tmp76 < 0) != (tmp79 < 0), tl.where(tmp76 % tmp79 != 0, tmp76 // tmp79 - 1, tmp76 // tmp79), tmp76 // tmp79) tmp81 = tmp80 * tmp79 tmp82 = tmp76 - tmp81 tmp83 = tmp0 + tmp80 tmp84 = tmp6 + tmp82 tmp85 = tmp83 * tmp3 tmp86 = tmp85 + tmp84 tmp87 = tl.full([1], 42, tl.int64) tmp88 = tmp86 + tmp87 tl.store(in_out_ptr0 + x3, tmp78, xmask) tl.store(out_ptr1 + x3, tmp88, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf0 del buf0 buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int64) get_raw_stream(0) triton_poi_fused_add_max_pool2d_with_indices_sub_0[grid(64)](buf2, arg0_1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf2, buf3 class UsedIndicesNew(nn.Module): def __init__(self): super().__init__() self.mp = nn.MaxPool2d(kernel_size=[3, 3], stride=[2, 2], ceil_mode =True, return_indices=True) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
Piteryo/onnx2pytorch
UsedIndices
false
9,450
[ "Apache-2.0" ]
0
c25b3a5289ee7073d644d280a112c15382b7f690
https://github.com/Piteryo/onnx2pytorch/tree/c25b3a5289ee7073d644d280a112c15382b7f690
Classification
import torch import torch.nn as nn class Classification(nn.Module): """一个最简单的一层分类模型 Parameters: input_size:输入维度 num_classes:类别数量 return: logists:最大概率对应的标签 """ def __init__(self, input_size, num_classes): super(Classification, self).__init__() self.fc1 = nn.Linear(input_size, num_classes) self.init_params() def init_params(self): for param in self.parameters(): if len(param.size()) == 2: nn.init.xavier_uniform_(param) def forward(self, x): logists = torch.log_softmax(self.fc1(x), 1) return logists def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class ClassificationNew(nn.Module): """一个最简单的一层分类模型 Parameters: input_size:输入维度 num_classes:类别数量 return: logists:最大概率对应的标签 """ def __init__(self, input_size, num_classes): super(ClassificationNew, self).__init__() self.fc1 = nn.Linear(input_size, num_classes) self.init_params() def init_params(self): for param in self.parameters(): if len(param.size()) == 2: nn.init.xavier_uniform_(param) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
OuYangg/GNNs
Classification
false
9,451
[ "Apache-2.0" ]
0
ef5b1944490507684d603de3ae0b2aa7b5168f47
https://github.com/OuYangg/GNNs/tree/ef5b1944490507684d603de3ae0b2aa7b5168f47
SigmoidFocalClassificationLoss
import torch import torch.nn as nn def _sigmoid_cross_entropy_with_logits(logits, labels): loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits) loss += torch.log1p(torch.exp(-torch.abs(logits))) return loss class SigmoidFocalClassificationLoss(nn.Module): """Sigmoid focal cross entropy loss. Focal loss down-weights well classified examples and focusses on the hard examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. """ def __init__(self, gamma=2.0, alpha=0.25): """Constructor. Args: gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. all_zero_negative: bool. if True, will treat all zero as background. else, will treat first label as background. only affect alpha. """ super().__init__() self._alpha = alpha self._gamma = gamma def forward(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape [batch_size, num_anchors] class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ per_entry_cross_ent = _sigmoid_cross_entropy_with_logits(labels= target_tensor, logits=prediction_tensor) prediction_probabilities = torch.sigmoid(prediction_tensor) p_t = target_tensor * prediction_probabilities + (1 - target_tensor ) * (1 - prediction_probabilities) modulating_factor = 1.0 if self._gamma: modulating_factor = torch.pow(1.0 - p_t, self._gamma) alpha_weight_factor = 1.0 if self._alpha is not None: alpha_weight_factor = target_tensor * self._alpha + (1 - target_tensor) * (1 - self._alpha) focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * per_entry_cross_ent) return focal_cross_entropy_loss * weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0( in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp27 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp0 tmp6 = tmp4 - tmp2 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp9 = tmp4 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = 0.25 tmp12 = tmp0 * tmp11 tmp13 = 0.75 tmp14 = tmp5 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp10 * tmp15 tmp17 = 0.0 tmp18 = triton_helpers.maximum(tmp1, tmp17) tmp19 = tmp1 * tmp0 tmp20 = tmp18 - tmp19 tmp21 = tl_math.abs(tmp1) tmp22 = -tmp21 tmp23 = tl_math.exp(tmp22) tmp24 = libdevice.log1p(tmp23) tmp25 = tmp20 + tmp24 tmp26 = tmp16 * tmp25 tmp28 = tmp26 * tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0[ grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, def _sigmoid_cross_entropy_with_logits(logits, labels): loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits) loss += torch.log1p(torch.exp(-torch.abs(logits))) return loss class SigmoidFocalClassificationLossNew(nn.Module): """Sigmoid focal cross entropy loss. Focal loss down-weights well classified examples and focusses on the hard examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. """ def __init__(self, gamma=2.0, alpha=0.25): """Constructor. Args: gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. all_zero_negative: bool. if True, will treat all zero as background. else, will treat first label as background. only affect alpha. """ super().__init__() self._alpha = alpha self._gamma = gamma def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ShashwatNigam99/PointRCNN
SigmoidFocalClassificationLoss
false
9,452
[ "MIT" ]
0
eee5f90fe4215cff0156e1f8cecf485e18dce1f8
https://github.com/ShashwatNigam99/PointRCNN/tree/eee5f90fe4215cff0156e1f8cecf485e18dce1f8
MINCNet
import torch import torch.nn as nn import torch.utils.data class MINCNet(nn.Module): def __init__(self): super(MINCNet, self).__init__() self.ReLU = nn.ReLU(True) self.conv11 = nn.Conv2d(3, 64, 3, 1, 1) self.conv12 = nn.Conv2d(64, 64, 3, 1, 1) self.maxpool1 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv21 = nn.Conv2d(64, 128, 3, 1, 1) self.conv22 = nn.Conv2d(128, 128, 3, 1, 1) self.maxpool2 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv31 = nn.Conv2d(128, 256, 3, 1, 1) self.conv32 = nn.Conv2d(256, 256, 3, 1, 1) self.conv33 = nn.Conv2d(256, 256, 3, 1, 1) self.maxpool3 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv41 = nn.Conv2d(256, 512, 3, 1, 1) self.conv42 = nn.Conv2d(512, 512, 3, 1, 1) self.conv43 = nn.Conv2d(512, 512, 3, 1, 1) self.maxpool4 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv51 = nn.Conv2d(512, 512, 3, 1, 1) self.conv52 = nn.Conv2d(512, 512, 3, 1, 1) self.conv53 = nn.Conv2d(512, 512, 3, 1, 1) def forward(self, x): out = self.ReLU(self.conv11(x)) out = self.ReLU(self.conv12(out)) out = self.maxpool1(out) out = self.ReLU(self.conv21(out)) out = self.ReLU(self.conv22(out)) out = self.maxpool2(out) out = self.ReLU(self.conv31(out)) out = self.ReLU(self.conv32(out)) out = self.ReLU(self.conv33(out)) out = self.maxpool3(out) out = self.ReLU(self.conv41(out)) out = self.ReLU(self.conv42(out)) out = self.ReLU(self.conv43(out)) out = self.maxpool4(out) out = self.ReLU(self.conv51(out)) out = self.ReLU(self.conv52(out)) out = self.conv53(out) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_16(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x1 = xindex // 512 % 4 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 1024 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4608 + x0 + 1024 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_18(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 8192 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17, buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23, buf24, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_13[grid(262144)](buf27, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_13[grid(262144)](buf29, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_13[grid(262144)](buf31, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31, buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_15[grid(131072)](buf35, primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_15[grid(131072)](buf37, primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_15[grid(131072)](buf39, primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf40 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf41 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_16[grid(32768)](buf39, buf40, buf41, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf42 = extern_kernels.convolution(buf40, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_17[grid(32768)](buf43, primals_23, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_23 buf44 = extern_kernels.convolution(buf43, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf45 = buf44 del buf44 triton_poi_fused_convolution_relu_17[grid(32768)](buf45, primals_25, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_25 buf46 = extern_kernels.convolution(buf45, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf47 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_convolution_18[grid(2048, 16)](buf46, primals_27, buf47, 2048, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf46 del primals_27 return (buf47, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf40, buf41, buf43, buf45) class MINCNetNew(nn.Module): def __init__(self): super(MINCNetNew, self).__init__() self.ReLU = nn.ReLU(True) self.conv11 = nn.Conv2d(3, 64, 3, 1, 1) self.conv12 = nn.Conv2d(64, 64, 3, 1, 1) self.maxpool1 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv21 = nn.Conv2d(64, 128, 3, 1, 1) self.conv22 = nn.Conv2d(128, 128, 3, 1, 1) self.maxpool2 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv31 = nn.Conv2d(128, 256, 3, 1, 1) self.conv32 = nn.Conv2d(256, 256, 3, 1, 1) self.conv33 = nn.Conv2d(256, 256, 3, 1, 1) self.maxpool3 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv41 = nn.Conv2d(256, 512, 3, 1, 1) self.conv42 = nn.Conv2d(512, 512, 3, 1, 1) self.conv43 = nn.Conv2d(512, 512, 3, 1, 1) self.maxpool4 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv51 = nn.Conv2d(512, 512, 3, 1, 1) self.conv52 = nn.Conv2d(512, 512, 3, 1, 1) self.conv53 = nn.Conv2d(512, 512, 3, 1, 1) def forward(self, input_0): primals_1 = self.conv11.weight primals_2 = self.conv11.bias primals_4 = self.conv12.weight primals_5 = self.conv12.bias primals_6 = self.conv21.weight primals_7 = self.conv21.bias primals_8 = self.conv22.weight primals_9 = self.conv22.bias primals_10 = self.conv31.weight primals_11 = self.conv31.bias primals_12 = self.conv32.weight primals_13 = self.conv32.bias primals_14 = self.conv33.weight primals_15 = self.conv33.bias primals_16 = self.conv41.weight primals_17 = self.conv41.bias primals_18 = self.conv42.weight primals_19 = self.conv42.bias primals_20 = self.conv43.weight primals_21 = self.conv43.bias primals_22 = self.conv51.weight primals_23 = self.conv51.bias primals_24 = self.conv52.weight primals_25 = self.conv52.bias primals_26 = self.conv53.weight primals_27 = self.conv53.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
NicoleDeer/optimized-super-resolution
MINCNet
false
9,453
[ "Apache-2.0" ]
0
deba8a5cff06ab3bd8bf99e207b582f4ddc1ffd1
https://github.com/NicoleDeer/optimized-super-resolution/tree/deba8a5cff06ab3bd8bf99e207b582f4ddc1ffd1
UnusedIndices
import torch from torch import nn import torch.onnx class UnusedIndices(nn.Module): def __init__(self): super().__init__() self.mp = nn.MaxPool2d(kernel_size=[3, 3], stride=[2, 2], ceil_mode =True) def forward(self, x): return self.mp(x) - 42 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x4 = xindex // 2 x3 = xindex tmp0 = 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 1 + 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 2 + 2 * x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (2 + 2 * x0 + 8 * x4), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 1 + 2 * x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), tmp30 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), tmp33 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (6 + 2 * x0 + 8 * x4), tmp36 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 2 + 2 * x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (8 + 2 * x0 + 8 * x4), tmp43 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (9 + 2 * x0 + 8 * x4), tmp46 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (10 + 2 * x0 + 8 * x4), tmp49 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = 42.0 tmp53 = tmp51 - tmp52 tl.store(in_out_ptr0 + x3, tmp53, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_sub_0[grid(64)](buf1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf1, class UnusedIndicesNew(nn.Module): def __init__(self): super().__init__() self.mp = nn.MaxPool2d(kernel_size=[3, 3], stride=[2, 2], ceil_mode =True) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Piteryo/onnx2pytorch
UnusedIndices
false
9,454
[ "Apache-2.0" ]
0
c25b3a5289ee7073d644d280a112c15382b7f690
https://github.com/Piteryo/onnx2pytorch/tree/c25b3a5289ee7073d644d280a112c15382b7f690
Mean
import torch from torchvision.datasets import * import torch.nn as nn from torchvision.transforms import * class Mean(nn.Module): def __init__(self, dim, keep_dim=False): super(Mean, self).__init__() self.dim = dim self.keep_dim = keep_dim def forward(self, input): return input.mean(self.dim, self.keep_dim) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torchvision.datasets import * import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MeanNew(nn.Module): def __init__(self, dim, keep_dim=False): super(MeanNew, self).__init__() self.dim = dim self.keep_dim = keep_dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JJavierga/PyTorch-Encoding
Mean
false
9,455
[ "MIT" ]
0
207254b2a60276a31ffa24b76ae84df27c6ebf94
https://github.com/JJavierga/PyTorch-Encoding/tree/207254b2a60276a31ffa24b76ae84df27c6ebf94
SageLayer
import torch import torch.nn as nn import torch.nn.functional as F class SageLayer(nn.Module): """ 一层SageLayer """ def __init__(self, input_size, out_size, gcn=False): super(SageLayer, self).__init__() self.input_size = input_size self.out_size = out_size self.gcn = gcn self.weight = nn.Parameter(torch.FloatTensor(out_size, self. input_size if self.gcn else 2 * self.input_size)) self.init_params() def init_params(self): for param in self.parameters(): nn.init.xavier_uniform_(param) def forward(self, self_feats, aggregate_feats, neighs=None): """ Parameters: self_feats:源节点的特征向量 aggregate_feats:聚合后的邻居节点特征 """ if not self.gcn: combined = torch.cat([self_feats, aggregate_feats], dim=1) else: combined = aggregate_feats combined = F.relu(self.weight.mm(combined.t())).t() return combined def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return reinterpret_tensor(buf2, (4, 4), (1, 4), 0), buf3, buf0 class SageLayerNew(nn.Module): """ 一层SageLayer """ def __init__(self, input_size, out_size, gcn=False): super(SageLayerNew, self).__init__() self.input_size = input_size self.out_size = out_size self.gcn = gcn self.weight = nn.Parameter(torch.FloatTensor(out_size, self. input_size if self.gcn else 2 * self.input_size)) self.init_params() def init_params(self): for param in self.parameters(): nn.init.xavier_uniform_(param) def forward(self, input_0, input_1): primals_3 = self.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
OuYangg/GNNs
SageLayer
false
9,456
[ "Apache-2.0" ]
0
ef5b1944490507684d603de3ae0b2aa7b5168f47
https://github.com/OuYangg/GNNs/tree/ef5b1944490507684d603de3ae0b2aa7b5168f47
CELoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.nn.functional as F class CELoss(nn.Module): def __init__(self, ratio=1, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean'): super(CELoss, self).__init__() self.ratio = ratio self.weight = weight self.ignore_index = ignore_index self.reduction = reduction def forward(self, input, target): """ Calculate the cross-entropy loss :param input(torch.Tensor): The prediction with shape (N, C), C is the number of classes. :param target(torch.Tensor): The learning label(N, 1) of the prediction. :return: (torch.Tensor): The calculated loss """ target = target.squeeze_() return self.ratio * F.cross_entropy(input, target, weight=self. weight, ignore_index=self.ignore_index, reduction=self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = 1.0 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class CELossNew(nn.Module): def __init__(self, ratio=1, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean'): super(CELossNew, self).__init__() self.ratio = ratio self.weight = weight self.ignore_index = ignore_index self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Karenou/mmfashion
CELoss
false
9,457
[ "Apache-2.0" ]
0
dfc334232d1700cde18d144f983dd5b0a7f9852a
https://github.com/Karenou/mmfashion/tree/dfc334232d1700cde18d144f983dd5b0a7f9852a
RobertaSequenceClassificationHead
import torch import torch.nn as nn import torch.utils.data import torch.onnx.operators import torch.optim import torch.optim.lr_scheduler class RobertaSequenceClassificationHead(nn.Module): """Head for sequence-level classification tasks. Ignores the <s> vector.""" def __init__(self, input_dim, inner_dim, kernel_size, num_classes, pooler_dropout): super().__init__() self.conv_layer = nn.Conv1d(in_channels=input_dim, out_channels= inner_dim, kernel_size=kernel_size) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = torch.transpose(features, 1, 2) x = self.conv_layer(x) x = torch.max(x, dim=2).values x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'inner_dim': 4, 'kernel_size': 4, 'num_classes': 4, 'pooler_dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch.onnx.operators import torch.optim import torch.optim.lr_scheduler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_max_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_convolution_max_1[grid(16)](buf2, primals_3, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del buf3 del primals_5 return buf4, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf2, primals_4 class RobertaSequenceClassificationHeadNew(nn.Module): """Head for sequence-level classification tasks. Ignores the <s> vector.""" def __init__(self, input_dim, inner_dim, kernel_size, num_classes, pooler_dropout): super().__init__() self.conv_layer = nn.Conv1d(in_channels=input_dim, out_channels= inner_dim, kernel_size=kernel_size) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, input_0): primals_1 = self.conv_layer.weight primals_3 = self.conv_layer.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Sanjaje/stp_llmushu
RobertaSequenceClassificationHead
false
9,458
[ "MIT" ]
0
f6652c9c0506780374b4634933b1b725e989de24
https://github.com/Sanjaje/stp_llmushu/tree/f6652c9c0506780374b4634933b1b725e989de24
FeatureCorrelation
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class FeatureCorrelation(nn.Module): def __init__(self): super(FeatureCorrelation, self).__init__() def forward(self, feat_a, feat_b): bs, c, h, w = feat_a.size() feat_a = feat_a.transpose(2, 3).contiguous().view(bs, c, h * w) feat_b = feat_b.view(bs, c, h * w).transpose(1, 2) feat_mul = torch.bmm(feat_b, feat_a) correlate_tensor = feat_mul.view(bs, h, w, h * w).transpose(2, 3 ).transpose(1, 2) return correlate_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), out=buf1) del arg1_1 del buf0 return reinterpret_tensor(buf1, (4, 16, 4, 4), (256, 1, 64, 16), 0), class FeatureCorrelationNew(nn.Module): def __init__(self): super(FeatureCorrelationNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Karenou/mmfashion
FeatureCorrelation
false
9,459
[ "Apache-2.0" ]
0
dfc334232d1700cde18d144f983dd5b0a7f9852a
https://github.com/Karenou/mmfashion/tree/dfc334232d1700cde18d144f983dd5b0a7f9852a
FeatureNorm
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class FeatureNorm(nn.Module): def __init__(self, eps=1e-06): super(FeatureNorm, self).__init__() self.eps = eps def forward(self, feature): norm_feat = torch.sum(torch.pow(feature, 2), 1) + self.eps norm_feat = torch.pow(norm_feat, 0.5).unsqueeze(1) norm_feat = norm_feat.expand_as(feature) norm_feat = torch.div(feature, norm_feat) return norm_feat def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 1e-06 tmp13 = tmp11 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class FeatureNormNew(nn.Module): def __init__(self, eps=1e-06): super(FeatureNormNew, self).__init__() self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Karenou/mmfashion
FeatureNorm
false
9,460
[ "Apache-2.0" ]
0
dfc334232d1700cde18d144f983dd5b0a7f9852a
https://github.com/Karenou/mmfashion/tree/dfc334232d1700cde18d144f983dd5b0a7f9852a
GCN
import math import torch import torch.nn as nn import torch.nn.functional as F class GCNLayer(nn.Module): def __init__(self, input_features, output_features, bias=False): super(GCNLayer, self).__init__() self.input_features = input_features self.output_features = output_features self.weights = nn.Parameter(torch.FloatTensor(input_features, output_features)) if bias: self.bias = nn.Parameter(torch.FloatTensor(output_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.weights.size(1)) self.weights.data.uniform_(-std, std) if self.bias is not None: self.bias.data.uniform_(-std, std) def forward(self, adj, x): support = torch.mm(x, self.weights) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias return output class GCN(nn.Module): def __init__(self, input_size, hidden_size, num_class, dropout, bias=False ): super(GCN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_class = num_class self.gcn1 = GCNLayer(input_size, hidden_size, bias=bias) self.gcn2 = GCNLayer(hidden_size, num_class, bias=bias) self.dropout = dropout def forward(self, adj, x): x = F.relu(self.gcn1(adj, x)) x = F.dropout(x, self.dropout, training=self.training) x = self.gcn2(adj, x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'num_class': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3 del buf3 triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GCNLayer(nn.Module): def __init__(self, input_features, output_features, bias=False): super(GCNLayer, self).__init__() self.input_features = input_features self.output_features = output_features self.weights = nn.Parameter(torch.FloatTensor(input_features, output_features)) if bias: self.bias = nn.Parameter(torch.FloatTensor(output_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.weights.size(1)) self.weights.data.uniform_(-std, std) if self.bias is not None: self.bias.data.uniform_(-std, std) def forward(self, adj, x): support = torch.mm(x, self.weights) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias return output class GCNNew(nn.Module): def __init__(self, input_size, hidden_size, num_class, dropout, bias=False ): super(GCNNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_class = num_class self.gcn1 = GCNLayer(input_size, hidden_size, bias=bias) self.gcn2 = GCNLayer(hidden_size, num_class, bias=bias) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gcn1.weights primals_2 = self.gcn2.weights primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
OuYangg/GNNs
GCN
false
9,461
[ "Apache-2.0" ]
0
ef5b1944490507684d603de3ae0b2aa7b5168f47
https://github.com/OuYangg/GNNs/tree/ef5b1944490507684d603de3ae0b2aa7b5168f47
Normalize
import torch from torchvision.datasets import * import torch.nn.functional as F import torch.nn as nn from torchvision.transforms import * class Normalize(nn.Module): """Performs :math:`L_p` normalization of inputs over specified dimension. Does: .. math:: v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)} for each subtensor v over dimension dim of input. Each subtensor is flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix norm. With default arguments normalizes over the second dimension with Euclidean norm. Args: p (float): the exponent value in the norm formulation. Default: 2 dim (int): the dimension to reduce. Default: 1 """ def __init__(self, p=2, dim=1): super(Normalize, self).__init__() self.p = p self.dim = dim def forward(self, x): return F.normalize(x, self.p, self.dim, eps=1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torchvision.datasets import * import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): """Performs :math:`L_p` normalization of inputs over specified dimension. Does: .. math:: v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)} for each subtensor v over dimension dim of input. Each subtensor is flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix norm. With default arguments normalizes over the second dimension with Euclidean norm. Args: p (float): the exponent value in the norm formulation. Default: 2 dim (int): the dimension to reduce. Default: 1 """ def __init__(self, p=2, dim=1): super(NormalizeNew, self).__init__() self.p = p self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JJavierga/PyTorch-Encoding
Normalize
false
9,462
[ "MIT" ]
0
207254b2a60276a31ffa24b76ae84df27c6ebf94
https://github.com/JJavierga/PyTorch-Encoding/tree/207254b2a60276a31ffa24b76ae84df27c6ebf94
L1NormLoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class L1NormLoss(nn.Module): def __init__(self, loss_weight=0.0005, average=True): super(L1NormLoss, self).__init__() self.loss_weight = loss_weight self.average = average def forward(self, x1, x2, x3, length): loss_norm = (x1 + x2 + x3) / 3 if self.average: loss_norm = loss_norm / length return self.loss_weight * loss_norm def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp7 = tl.load(in_ptr3 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.3333333333333333 tmp6 = tmp4 * tmp5 tmp8 = tmp6 / tmp7 tmp9 = 0.0005 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_0[grid(256)](arg0_1, arg1_1, arg2_1, arg3_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf0, class L1NormLossNew(nn.Module): def __init__(self, loss_weight=0.0005, average=True): super(L1NormLossNew, self).__init__() self.loss_weight = loss_weight self.average = average def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
Karenou/mmfashion
L1NormLoss
false
9,463
[ "Apache-2.0" ]
0
dfc334232d1700cde18d144f983dd5b0a7f9852a
https://github.com/Karenou/mmfashion/tree/dfc334232d1700cde18d144f983dd5b0a7f9852a
SmoothL1Loss
import torch import torch.nn.functional as F import torch.nn as nn def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.sum() / pred.numel() elif reduction_enum == 2: return loss.sum() def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None): if avg_factor is None: avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-06 loss = smooth_l1_loss(pred, target, beta, reduction='none') return torch.sum(loss * weight)[None] / avg_factor class SmoothL1Loss(nn.Module): def __init__(self, beta=1.0, loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.loss_weight = loss_weight def forward(self, pred, target, weight, *args, **kwargs): loss_bbox = self.loss_weight * weighted_smoothl1(pred, target, weight, *args, beta=self.beta, **kwargs) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_abs_div_lt_mul_sub_sum_where_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp6 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp13 = tmp11 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 0.015624999755859379 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = reinterpret_tensor(buf0, (1,), (1,), 0) del buf0 get_raw_stream(0) triton_per_fused_abs_div_lt_mul_sub_sum_where_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.sum() / pred.numel() elif reduction_enum == 2: return loss.sum() def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None): if avg_factor is None: avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-06 loss = smooth_l1_loss(pred, target, beta, reduction='none') return torch.sum(loss * weight)[None] / avg_factor class SmoothL1LossNew(nn.Module): def __init__(self, beta=1.0, loss_weight=1.0): super(SmoothL1LossNew, self).__init__() self.beta = beta self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Sign-up-soon-after-papapa/DEA-Net
SmoothL1Loss
false
9,464
[ "Apache-2.0" ]
0
ed25f30ddedcb77eb0991aeb9e498ef2efd8c635
https://github.com/Sign-up-soon-after-papapa/DEA-Net/tree/ed25f30ddedcb77eb0991aeb9e498ef2efd8c635
UpsampleConv2d
from torch.nn import Module import math import torch from torchvision.datasets import * import torch.nn.functional as F from torch.nn import Parameter from torch.nn.modules.utils import _pair from torchvision.transforms import * class UpsampleConv2d(Module): """ To avoid the checkerboard artifacts of standard Fractionally-strided Convolution, we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for each convolutional window. .. image:: _static/img/upconv.png :width: 50% :align: center Reference: Hang Zhang and Kristin Dana. "Multi-style Generative Network for Real-time Transfer." *arXiv preprint arXiv:1703.06953 (2017)* Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 output_padding (int or tuple, optional): Zero-padding added to one side of the output. Default: 0 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 scale_factor (int): scaling factor for upsampling convolution. Default: 1 Shape: - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where :math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]` :math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]` Attributes: weight (Tensor): the learnable weights of the module of shape (in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1]) bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels) Examples: >>> # With square kernels and equal stride >>> m = nn.UpsampleCov2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) >>> input = autograd.Variable(torch.randn(20, 16, 50, 100)) >>> output = m(input) >>> # exact output size can be also specified as an argument >>> input = autograd.Variable(torch.randn(1, 16, 12, 12)) >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1) >>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1) >>> h = downsample(input) >>> h.size() torch.Size([1, 16, 6, 6]) >>> output = upsample(h, output_size=input.size()) >>> output.size() torch.Size([1, 16, 12, 12]) """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, scale_factor=1, bias=True): super(UpsampleConv2d, self).__init__() kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.scale_factor = scale_factor self.weight = Parameter(torch.Tensor(out_channels * scale_factor * scale_factor, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels * scale_factor * scale_factor)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input): out = F.conv2d(input, self.weight, self.bias, self.stride, self. padding, self.dilation, self.groups) return F.pixel_shuffle(out, self.scale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import math from torchvision.datasets import * from torch.nn import Parameter from torch.nn.modules.utils import _pair from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0 ), primals_1, primals_3 class UpsampleConv2dNew(Module): """ To avoid the checkerboard artifacts of standard Fractionally-strided Convolution, we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for each convolutional window. .. image:: _static/img/upconv.png :width: 50% :align: center Reference: Hang Zhang and Kristin Dana. "Multi-style Generative Network for Real-time Transfer." *arXiv preprint arXiv:1703.06953 (2017)* Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 output_padding (int or tuple, optional): Zero-padding added to one side of the output. Default: 0 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 scale_factor (int): scaling factor for upsampling convolution. Default: 1 Shape: - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where :math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]` :math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]` Attributes: weight (Tensor): the learnable weights of the module of shape (in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1]) bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels) Examples: >>> # With square kernels and equal stride >>> m = nn.UpsampleCov2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) >>> input = autograd.Variable(torch.randn(20, 16, 50, 100)) >>> output = m(input) >>> # exact output size can be also specified as an argument >>> input = autograd.Variable(torch.randn(1, 16, 12, 12)) >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1) >>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1) >>> h = downsample(input) >>> h.size() torch.Size([1, 16, 6, 6]) >>> output = upsample(h, output_size=input.size()) >>> output.size() torch.Size([1, 16, 12, 12]) """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, scale_factor=1, bias=True): super(UpsampleConv2dNew, self).__init__() kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.scale_factor = scale_factor self.weight = Parameter(torch.Tensor(out_channels * scale_factor * scale_factor, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels * scale_factor * scale_factor)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
JJavierga/PyTorch-Encoding
UpsampleConv2d
false
9,465
[ "MIT" ]
0
207254b2a60276a31ffa24b76ae84df27c6ebf94
https://github.com/JJavierga/PyTorch-Encoding/tree/207254b2a60276a31ffa24b76ae84df27c6ebf94
MarginRankingLoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.nn.functional as F class MarginRankingLoss(nn.Module): def __init__(self, margin=0.2, loss_weight=5e-05, size_average=None, reduce=None, reduction='mean'): super(MarginRankingLoss, self).__init__() self.margin = margin self.loss_weight = loss_weight self.reduction = reduction def forward(self, input1, input2, target): return self.loss_weight * F.margin_ranking_loss(input1, input2, target, margin=self.margin, reduction=self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_min_mean_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp3 = tl.load(in_ptr2 + r0, None) tmp1 = -tmp0 tmp4 = tmp2 - tmp3 tmp5 = tmp1 * tmp4 tmp6 = 0.2 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = 5e-05 tmp16 = tmp14 * tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_min_mean_mul_neg_sub_0[grid(1)](buf1, arg0_1, arg2_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class MarginRankingLossNew(nn.Module): def __init__(self, margin=0.2, loss_weight=5e-05, size_average=None, reduce=None, reduction='mean'): super(MarginRankingLossNew, self).__init__() self.margin = margin self.loss_weight = loss_weight self.reduction = reduction def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Karenou/mmfashion
MarginRankingLoss
false
9,466
[ "Apache-2.0" ]
0
dfc334232d1700cde18d144f983dd5b0a7f9852a
https://github.com/Karenou/mmfashion/tree/dfc334232d1700cde18d144f983dd5b0a7f9852a
GlobalAvgPool1d
import torch import torch.nn as nn class GlobalAvgPool1d(nn.Module): def __init__(self): """Global average pooling over the input's spatial dimensions""" super(GlobalAvgPool1d, self).__init__() def forward(self, inputs): return nn.functional.adaptive_avg_pool1d(inputs, 1).view(inputs. size(0), -1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1), (1, 1), 0), class GlobalAvgPool1dNew(nn.Module): def __init__(self): """Global average pooling over the input's spatial dimensions""" super(GlobalAvgPool1dNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Neronjust2017/challenge2020_test4
GlobalAvgPool1d
false
9,467
[ "BSD-2-Clause" ]
0
6494107a459b563aa51f8ea75c580c17557b13af
https://github.com/Neronjust2017/challenge2020_test4/tree/6494107a459b563aa51f8ea75c580c17557b13af
SelectiveMarginLoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class SelectiveMarginLoss(nn.Module): def __init__(self, loss_weight=5e-05, margin=0.2): super(SelectiveMarginLoss, self).__init__() self.margin = margin self.loss_weight = loss_weight def forward(self, pos_samples, neg_samples, has_sample): margin_diff = torch.clamp(pos_samples - neg_samples + self.margin, min=0, max=1000000.0) num_sample = max(torch.sum(has_sample), 1) return self.loss_weight * (torch.sum(margin_diff * has_sample) / num_sample) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_div_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 0.2 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 1000000.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tl.broadcast_to(tmp9, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 1.0 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp13 / tmp18 tmp20 = 5e-05 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_div_mul_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class SelectiveMarginLossNew(nn.Module): def __init__(self, loss_weight=5e-05, margin=0.2): super(SelectiveMarginLossNew, self).__init__() self.margin = margin self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Karenou/mmfashion
SelectiveMarginLoss
false
9,468
[ "Apache-2.0" ]
0
dfc334232d1700cde18d144f983dd5b0a7f9852a
https://github.com/Karenou/mmfashion/tree/dfc334232d1700cde18d144f983dd5b0a7f9852a
TCB
import torch import torch.nn as nn from itertools import product as product import torch.onnx.symbolic_helper class TCB(nn.Module): """ Transfer Connection Block Architecture This block """ def __init__(self, lateral_channels, channles, internal_channels=256, is_batchnorm=False): """ :param lateral_channels: number of forward feature channles :param channles: number of pyramid feature channles :param internal_channels: number of internal channels """ super(TCB, self).__init__() self.is_batchnorm = is_batchnorm use_bias = not self.is_batchnorm self.conv1 = nn.Conv2d(lateral_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.conv2 = nn.Conv2d(internal_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.deconv = nn.ConvTranspose2d(channles, internal_channels, kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias ) self.conv3 = nn.Conv2d(internal_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.relu = nn.ReLU(inplace=True) if self.is_batchnorm: self.bn1 = nn.BatchNorm2d(internal_channels) self.bn2 = nn.BatchNorm2d(internal_channels) self.deconv_bn = nn.BatchNorm2d(internal_channels) self.bn3 = nn.BatchNorm2d(internal_channels) self.out_channels = internal_channels def forward(self, lateral, x): if self.is_batchnorm: lateral_out = self.relu(self.bn1(self.conv1(lateral))) out = self.relu(self.bn2(self.conv2(lateral_out)) + self. deconv_bn(self.deconv(x))) out = self.relu(self.bn3(self.conv3(out))) else: lateral_out = self.relu(self.conv1(lateral)) out = self.relu(self.conv2(lateral_out) + self.deconv(x)) out = self.relu(self.conv3(out)) return out def get_inputs(): return [torch.rand([4, 4, 16, 16]), torch.rand([4, 4, 8, 8])] def get_init_inputs(): return [[], {'lateral_channels': 4, 'channles': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from itertools import product as product import torch.onnx.symbolic_helper assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_relu_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp4 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tl.store(in_out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 16, 16), (65536, 256, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 16, 16), (65536, 256, 16, 1)) buf3 = extern_kernels.convolution(primals_8, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 16, 16), (65536, 256, 16, 1)) buf4 = buf2 del buf2 triton_poi_fused_add_convolution_relu_1[grid(262144)](buf4, primals_5, buf3, primals_7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf3 del primals_5 del primals_7 buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 16, 16), (65536, 256, 16, 1)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(262144)]( buf6, primals_10, buf7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_10 return (buf6, primals_1, primals_3, primals_4, primals_6, primals_8, primals_9, buf1, buf4, buf7) class TCBNew(nn.Module): """ Transfer Connection Block Architecture This block """ def __init__(self, lateral_channels, channles, internal_channels=256, is_batchnorm=False): """ :param lateral_channels: number of forward feature channles :param channles: number of pyramid feature channles :param internal_channels: number of internal channels """ super(TCBNew, self).__init__() self.is_batchnorm = is_batchnorm use_bias = not self.is_batchnorm self.conv1 = nn.Conv2d(lateral_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.conv2 = nn.Conv2d(internal_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.deconv = nn.ConvTranspose2d(channles, internal_channels, kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias ) self.conv3 = nn.Conv2d(internal_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.relu = nn.ReLU(inplace=True) if self.is_batchnorm: self.bn1 = nn.BatchNorm2d(internal_channels) self.bn2 = nn.BatchNorm2d(internal_channels) self.deconv_bn = nn.BatchNorm2d(internal_channels) self.bn3 = nn.BatchNorm2d(internal_channels) self.out_channels = internal_channels def forward(self, input_0, input_1): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.deconv.weight primals_7 = self.deconv.bias primals_9 = self.conv3.weight primals_10 = self.conv3.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
SaralaSewwandi/refinedet-onnxvalidation
TCB
false
9,469
[ "MIT" ]
0
5b71c994fc6ca183dc6cb30b7e21d201c15da490
https://github.com/SaralaSewwandi/refinedet-onnxvalidation/tree/5b71c994fc6ca183dc6cb30b7e21d201c15da490
GramMatrix
import torch from torchvision.datasets import * import torch.nn as nn from torchvision.transforms import * class GramMatrix(nn.Module): """ Gram Matrix for a 4D convolutional featuremaps as a mini-batch .. math:: \\mathcal{G} = \\sum_{h=1}^{H_i}\\sum_{w=1}^{W_i} \\mathcal{F}_{h,w}\\mathcal{F}_{h,w}^T """ def forward(self, y): b, ch, h, w = y.size() features = y.view(b, ch, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (ch * h * w) return gram def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torchvision.datasets import * import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.015625 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) del arg0_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, class GramMatrixNew(nn.Module): """ Gram Matrix for a 4D convolutional featuremaps as a mini-batch .. math:: \\mathcal{G} = \\sum_{h=1}^{H_i}\\sum_{w=1}^{W_i} \\mathcal{F}_{h,w}\\mathcal{F}_{h,w}^T """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JJavierga/PyTorch-Encoding
GramMatrix
false
9,470
[ "MIT" ]
0
207254b2a60276a31ffa24b76ae84df27c6ebf94
https://github.com/JJavierga/PyTorch-Encoding/tree/207254b2a60276a31ffa24b76ae84df27c6ebf94
MSELoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.nn.functional as F class MSELoss(nn.Module): def __init__(self, ratio=1, size_average=None, reduce=None, reduction= 'mean'): super(MSELoss, self).__init__() self.ratio = ratio self.size_average = size_average self.reduce = reduce self.reduction = reduction def forward(self, input, target, avg_factor=None): return self.ratio * F.mse_loss(input, target, reduction=self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSELossNew(nn.Module): def __init__(self, ratio=1, size_average=None, reduce=None, reduction= 'mean'): super(MSELossNew, self).__init__() self.ratio = ratio self.size_average = size_average self.reduce = reduce self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Karenou/mmfashion
MSELoss
false
9,471
[ "Apache-2.0" ]
0
dfc334232d1700cde18d144f983dd5b0a7f9852a
https://github.com/Karenou/mmfashion/tree/dfc334232d1700cde18d144f983dd5b0a7f9852a
SpatialAttention
import torch import torch.nn as nn class SpatialAttention(nn.Module): def __init__(self, kernel_size=7): super(SpatialAttention, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv1d(2, 1, kernel_size, padding=padding, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): avg_out = torch.mean(x, dim=1, keepdim=True) max_out, _ = torch.max(x, dim=1, keepdim=True) out = torch.cat([avg_out, max_out], dim=1) out = self.conv1(out) return self.sigmoid(out) * x def get_inputs(): return [torch.rand([4, 2, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 % 2 x0 = xindex % 64 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 128 * x2), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (64 + x0 + 128 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = 2.0 tmp9 = tmp7 / tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp15 = tl.load(in_ptr0 + (x0 + 128 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr0 + (64 + x0 + 128 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp12, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp11, tmp19) tl.store(out_ptr0 + x3, tmp20, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex // 128 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr1 + x3, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 2, 64), (128, 64, 1)) assert_size_stride(primals_2, (1, 2, 7), (14, 7, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 64), (64, 64, 1)) buf2 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.float32) triton_poi_fused_mul_sigmoid_1[grid(512)](buf1, primals_1, buf2, 512, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_1, primals_2, buf0, buf1 class SpatialAttentionNew(nn.Module): def __init__(self, kernel_size=7): super(SpatialAttentionNew, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv1d(2, 1, kernel_size, padding=padding, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv1.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Neronjust2017/challenge2020_test4
SpatialAttention
false
9,472
[ "BSD-2-Clause" ]
0
6494107a459b563aa51f8ea75c580c17557b13af
https://github.com/Neronjust2017/challenge2020_test4/tree/6494107a459b563aa51f8ea75c580c17557b13af
MultiHeadedLinerAttention
import torch from torch import nn class MultiHeadedLinerAttention(nn.Module): """Multi-Head Linear Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head, n_feat, dropout_rate): """Construct an MultiHeadedAttention object.""" super(MultiHeadedLinerAttention, self).__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.attn = None self.dropout = nn.Dropout(p=dropout_rate) def forward(self, query, key, value, mask=None): """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) dim = v.size(-1) q, k = map(lambda x: x * dim ** -0.25, (q, k)) q = q.softmax(dim=-1) k = k.softmax(dim=-2) context_einsum_eq = 'bhnd,bhne->bhde' context = torch.einsum(context_einsum_eq, k, v) attn_einsum_eq = 'bhnd,bhde->bhne' x = torch.einsum(attn_einsum_eq, q, context) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_mul_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float('-inf')) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r2 + 16 * x3), tmp15, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_clone_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp4 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp7, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp9, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf6 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_mul_0[grid(16)](buf1, primals_5, buf6, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_5 buf7 = reinterpret_tensor(buf1, (4, 4, 16, 1, 1), (64, 16, 1, 1, 1), 0) del buf1 triton_poi_fused_clone_1[grid(16, 16)](buf2, primals_8, buf7, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf8 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 16), (16, 16, 1 ), 0), reinterpret_tensor(buf7, (16, 16, 1), (16, 1, 0), 0), out=buf8) buf3 = reinterpret_tensor(buf2, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf2 buf9 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused__softmax_clone_mul_2[grid(64, 4)](buf0, primals_3, buf8, buf3, buf9, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_3 buf10 = buf0 del buf0 extern_kernels.addmm(primals_11, reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_11 return reinterpret_tensor(buf10, (4, 16, 4), (64, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf3, buf6, buf8, reinterpret_tensor(buf9, (64, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf7, (16, 1, 16), (16, 1, 1), 0) class MultiHeadedLinerAttentionNew(nn.Module): """Multi-Head Linear Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head, n_feat, dropout_rate): """Construct an MultiHeadedAttention object.""" super(MultiHeadedLinerAttentionNew, self).__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.attn = None self.dropout = nn.Dropout(p=dropout_rate) def forward(self, input_0, input_1, input_2): primals_2 = self.linear_q.weight primals_3 = self.linear_q.bias primals_4 = self.linear_k.weight primals_5 = self.linear_k.bias primals_7 = self.linear_v.weight primals_8 = self.linear_v.bias primals_10 = self.linear_out.weight primals_11 = self.linear_out.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Shengqiang-Li/LAC
MultiHeadedLinerAttention
false
9,473
[ "Apache-2.0" ]
0
6b549cd89e03be2fafa4ce4378e70538744b9aa3
https://github.com/Shengqiang-Li/LAC/tree/6b549cd89e03be2fafa4ce4378e70538744b9aa3
SpatialTokenGen
import torch import torch.nn as nn class SpatialTokenGen(nn.Module): def __init__(self, d_ffn, seq_len): super(SpatialTokenGen, self).__init__() self.layer_norm = nn.LayerNorm(d_ffn) self.squeeze_layer_i = nn.Linear(d_ffn, 1) self.squeeze_layer_ii = nn.Conv1d(seq_len, 1, 1) def forward(self, x): x = self.layer_norm(x) x = self.squeeze_layer_i(x) x = self.squeeze_layer_ii(x) tok = torch.mean(x) tok = torch.sigmoid(tok) return tok def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_ffn': 4, 'seq_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mean_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_out_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr0 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = 1.0 tmp6 = tmp4 / tmp5 tmp7 = tl.sigmoid(tmp6) tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_1 del primals_2 buf4 = reinterpret_tensor(buf1, (4, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 4, 1 ), (4, 1, 1), 0), primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (1, 1, 1), (1, 1, 1)) buf6 = reinterpret_tensor(buf5, (), (), 0) del buf5 triton_poi_fused_mean_sigmoid_2[grid(1)](buf6, primals_7, 1, XBLOCK =1, num_warps=1, num_stages=1) del primals_7 return buf6, primals_3, primals_6, buf2, reinterpret_tensor(buf4, (1, 4, 1), (4, 1, 1), 0), buf6, primals_4 class SpatialTokenGenNew(nn.Module): def __init__(self, d_ffn, seq_len): super(SpatialTokenGenNew, self).__init__() self.layer_norm = nn.LayerNorm(d_ffn) self.squeeze_layer_i = nn.Linear(d_ffn, 1) self.squeeze_layer_ii = nn.Conv1d(seq_len, 1, 1) def forward(self, input_0): primals_1 = self.layer_norm.weight primals_2 = self.layer_norm.bias primals_4 = self.squeeze_layer_i.weight primals_5 = self.squeeze_layer_i.bias primals_6 = self.squeeze_layer_ii.weight primals_7 = self.squeeze_layer_ii.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
SeungoneKim/sgMLP_Implementation
SpatialTokenGen
false
9,474
[ "Apache-2.0" ]
0
5c5e623577a7ada3b200d99e77dc707a10cb1195
https://github.com/SeungoneKim/sgMLP_Implementation/tree/5c5e623577a7ada3b200d99e77dc707a10cb1195
ModelRegressionAdt2Gex
import torch import torch.utils.data import torch.nn.functional as F import torch.nn as nn class Swish(torch.autograd.Function): @staticmethod def forward(ctx, i): result = i * sigmoid(i) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): i = ctx.saved_variables[0] sigmoid_i = sigmoid(i) return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) class Swish_module(nn.Module): def forward(self, x): return Swish.apply(x) class ModelRegressionAdt2Gex(nn.Module): def __init__(self, dim_mod1, dim_mod2): super(ModelRegressionAdt2Gex, self).__init__() self.input_ = nn.Linear(dim_mod1, 512) self.dropout1 = nn.Dropout(p=0.0) self.swish = Swish_module() self.fc = nn.Linear(512, 512) self.fc1 = nn.Linear(512, 512) self.fc2 = nn.Linear(512, 512) self.output = nn.Linear(512, dim_mod2) def forward(self, x): x = F.gelu(self.input_(x)) x = F.gelu(self.fc(x)) x = F.gelu(self.fc1(x)) x = F.gelu(self.fc2(x)) x = F.gelu(self.output(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_mod1': 4, 'dim_mod2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_gelu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 512), (512, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 512), (512, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512, 512), (512, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (4, 512), (512, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(32768)](buf0, buf1, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 512), (1, 512 ), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) triton_poi_fused_gelu_0[grid(32768)](buf2, buf3, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 512), (1, 512 ), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) triton_poi_fused_gelu_0[grid(32768)](buf4, buf5, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 512), (1, 512 ), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) triton_poi_fused_gelu_0[grid(32768)](buf6, buf7, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 512), (512, 1), 0), reinterpret_tensor(primals_10, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf8) del primals_11 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_gelu_1[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 512), (512, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 512), (512, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 512), (512, 1), 0 ), buf6, reinterpret_tensor(buf7, (64, 512), (512, 1), 0 ), buf8, primals_10, primals_8, primals_6, primals_4 class Swish(torch.autograd.Function): @staticmethod def forward(ctx, i): result = i * sigmoid(i) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): i = ctx.saved_variables[0] sigmoid_i = sigmoid(i) return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) class Swish_module(nn.Module): def forward(self, x): return Swish.apply(x) class ModelRegressionAdt2GexNew(nn.Module): def __init__(self, dim_mod1, dim_mod2): super(ModelRegressionAdt2GexNew, self).__init__() self.input_ = nn.Linear(dim_mod1, 512) self.dropout1 = nn.Dropout(p=0.0) self.swish = Swish_module() self.fc = nn.Linear(512, 512) self.fc1 = nn.Linear(512, 512) self.fc2 = nn.Linear(512, 512) self.output = nn.Linear(512, dim_mod2) def forward(self, input_0): primals_1 = self.input_.weight primals_2 = self.input_.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.output.weight primals_11 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Permoment-95/neurips2021_multimodal_topmethods
ModelRegressionAdt2Gex
false
9,475
[ "MIT" ]
0
017bc23b366a80ba9b1c2a47ea6c44124f77a7ca
https://github.com/Permoment-95/neurips2021_multimodal_topmethods/tree/017bc23b366a80ba9b1c2a47ea6c44124f77a7ca
CrossEntropyLoss
import torch import torch.nn.functional as F import torch.nn as nn def mask_cross_entropy(pred, target, label): num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, reduction ='mean')[None] def _expand_binary_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size (0), label_channels) return bin_labels, bin_label_weights def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None): if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if avg_factor is None: avg_factor = max(torch.sum(weight > 0).float().item(), 1.0) return F.binary_cross_entropy_with_logits(pred, label.float(), weight. float(), reduction='sum')[None] / avg_factor def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True): if avg_factor is None: avg_factor = max(torch.sum(weight > 0).float().item(), 1.0) raw = F.cross_entropy(pred, label, reduction='none') if reduce: return torch.sum(raw * weight)[None] / avg_factor else: return raw * weight / avg_factor class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, loss_weight=1.0): super(CrossEntropyLoss, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.loss_weight = loss_weight if self.use_sigmoid: self.cls_criterion = weighted_binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = weighted_cross_entropy def forward(self, cls_score, label, label_weight, *args, **kwargs): loss_cls = self.loss_weight * self.cls_criterion(cls_score, label, label_weight, *args, **kwargs) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tl.store(out_ptr0 + x2, tmp27, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + r2, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 0.00390625 tmp7 = tmp5 * tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](buf0, arg0_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = reinterpret_tensor(buf2, (1,), (1,), 0) del buf2 triton_per_fused__log_softmax_div_mul_neg_sum_2[grid(1)](buf3, buf1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf1 return buf3, def mask_cross_entropy(pred, target, label): num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, reduction ='mean')[None] def _expand_binary_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size (0), label_channels) return bin_labels, bin_label_weights def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None): if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if avg_factor is None: avg_factor = max(torch.sum(weight > 0).float().item(), 1.0) return F.binary_cross_entropy_with_logits(pred, label.float(), weight. float(), reduction='sum')[None] / avg_factor def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True): if avg_factor is None: avg_factor = max(torch.sum(weight > 0).float().item(), 1.0) raw = F.cross_entropy(pred, label, reduction='none') if reduce: return torch.sum(raw * weight)[None] / avg_factor else: return raw * weight / avg_factor class CrossEntropyLossNew(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, loss_weight=1.0): super(CrossEntropyLossNew, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.loss_weight = loss_weight if self.use_sigmoid: self.cls_criterion = weighted_binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = weighted_cross_entropy def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Sign-up-soon-after-papapa/DEA-Net
CrossEntropyLoss
false
9,476
[ "Apache-2.0" ]
0
ed25f30ddedcb77eb0991aeb9e498ef2efd8c635
https://github.com/Sign-up-soon-after-papapa/DEA-Net/tree/ed25f30ddedcb77eb0991aeb9e498ef2efd8c635
DownsampleA
import torch import torch.nn as nn class DownsampleA(nn.Module): def __init__(self, nIn, nOut, stride): super(DownsampleA, self).__init__() assert stride == 2 self.avg = nn.AvgPool2d(kernel_size=1, stride=stride) def forward(self, x): x = self.avg(x) return torch.cat((x, x.mul(0)), 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nIn': 4, 'nOut': 4, 'stride': 2}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4 % 8 x0 = xindex % 2 x1 = xindex // 2 % 2 x3 = xindex // 32 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tmp13 * tmp6 tmp15 = 0.0 tmp16 = tmp14 * tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp9, tmp18) tl.store(out_ptr0 + x4, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class DownsampleANew(nn.Module): def __init__(self, nIn, nOut, stride): super(DownsampleANew, self).__init__() assert stride == 2 self.avg = nn.AvgPool2d(kernel_size=1, stride=stride) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
QIU023/continual-learning-reproduce
DownsampleA
false
9,477
[ "MIT" ]
0
772faa6904b3488fa5deee14f03d86f3b3664a87
https://github.com/QIU023/continual-learning-reproduce/tree/772faa6904b3488fa5deee14f03d86f3b3664a87
CNN
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): def __init__(self, in_channels, output): super(CNN, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=20, kernel_size=3, stride=1, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=0) self.conv2 = nn.Conv2d(in_channels=20, out_channels=15, kernel_size =3, stride=1, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.conv2linear_input_shape = 15 * 15 * 15 self.fc1 = nn.Linear(self.conv2linear_input_shape, 256) self.fc2 = nn.Linear(256, 64) self.fc3 = nn.Linear(64, output) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool2(x) x = x.view(-1, self.conv2linear_input_shape) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) def get_inputs(): return [torch.rand([4, 4, 32, 32])] def get_init_inputs(): return [[], {'in_channels': 4, 'output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 20 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 72000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x4 = xindex // 900 x3 = xindex // 18000 x5 = xindex % 18000 tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1 + 1024 * x4), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 32 * x1 + 1024 * x4), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 32 * x1 + 1024 * x4), xmask) tmp5 = tl.load(in_ptr0 + (32 + x0 + 32 * x1 + 1024 * x4), xmask) tmp7 = tl.load(in_ptr0 + (33 + x0 + 32 * x1 + 1024 * x4), xmask) tmp9 = tl.load(in_ptr0 + (34 + x0 + 32 * x1 + 1024 * x4), xmask) tmp11 = tl.load(in_ptr0 + (64 + x0 + 32 * x1 + 1024 * x4), xmask) tmp13 = tl.load(in_ptr0 + (65 + x0 + 32 * x1 + 1024 * x4), xmask) tmp15 = tl.load(in_ptr0 + (66 + x0 + 32 * x1 + 1024 * x4), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + (x5 + 18016 * x3), tmp16, xmask) tl.store(out_ptr1 + (x5 + 18048 * x3), tmp41, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 54000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 15 x2 = xindex // 13500 x4 = xindex % 13500 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + 13504 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 13500 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 15 x1 = xindex // 15 % 225 x2 = xindex // 3375 x3 = xindex % 3375 tmp0 = tl.load(in_ptr0 + (2 * x0 + 60 * x1 + 13504 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 60 * x1 + 13504 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (30 + 2 * x0 + 60 * x1 + 13504 * x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (31 + 2 * x0 + 60 * x1 + 13504 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x3 + 3456 * x2), tmp15, xmask) tl.store(out_ptr1 + (x3 + 3392 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (20, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 4, 32, 32), (4096, 1024, 32, 1)) assert_size_stride(primals_4, (15, 20, 3, 3), (180, 9, 3, 1)) assert_size_stride(primals_5, (15,), (1,)) assert_size_stride(primals_6, (256, 3375), (3375, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (64, 256), (256, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (4, 64), (64, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 32, 32), (20480, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(81920)](buf1, primals_2, 81920, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 20, 30, 30), (18016, 900, 30, 1), torch.float32) buf3 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(72000)](buf1, buf2, buf3, 72000, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 15, 30, 30), (13500, 900, 30, 1)) buf5 = empty_strided_cuda((4, 15, 30, 30), (13504, 900, 30, 1), torch.float32) triton_poi_fused_convolution_relu_2[grid(54000)](buf4, primals_5, buf5, 54000, XBLOCK=512, num_warps=4, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 15, 15, 15), (3456, 225, 15, 1), torch.int8) buf7 = empty_strided_cuda((4, 15, 15, 15), (3392, 225, 15, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_3[grid(13500)](buf5, buf6, buf7, 13500, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 3375), (3392, 1), 0), reinterpret_tensor(primals_6, (3375, 256), (1, 3375), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(1024)](buf9, primals_7, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (256, 64), (1, 256), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(256)](buf11, primals_9, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf12) del primals_11 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_6[grid(16)](buf12, buf13, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf14 = buf12 del buf12 triton_poi_fused__log_softmax_7[grid(16)](buf13, buf14, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf13 return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 3375), (3392, 1), 0), buf9, buf11, buf14, primals_10, primals_8, primals_6) class CNNNew(nn.Module): def __init__(self, in_channels, output): super(CNNNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=20, kernel_size=3, stride=1, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=0) self.conv2 = nn.Conv2d(in_channels=20, out_channels=15, kernel_size =3, stride=1, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.conv2linear_input_shape = 15 * 15 * 15 self.fc1 = nn.Linear(self.conv2linear_input_shape, 256) self.fc2 = nn.Linear(256, 64) self.fc3 = nn.Linear(64, output) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Sheriff-A/CNN
CNN
false
9,478
[ "MIT" ]
0
59fc187e7cdf92379f52c4f942424d3a5042bf3e
https://github.com/Sheriff-A/CNN/tree/59fc187e7cdf92379f52c4f942424d3a5042bf3e
ModelRegressionGex2Adt
import torch import torch.utils.data import torch.nn.functional as F import torch.nn as nn class ModelRegressionGex2Adt(nn.Module): def __init__(self, dim_mod1, dim_mod2): super(ModelRegressionGex2Adt, self).__init__() self.input_ = nn.Linear(dim_mod1, 512) self.dropout1 = nn.Dropout(p=0.20335661386636347) self.dropout2 = nn.Dropout(p=0.15395289261127876) self.dropout3 = nn.Dropout(p=0.16902655078832815) self.fc = nn.Linear(512, 512) self.fc1 = nn.Linear(512, 2048) self.output = nn.Linear(2048, dim_mod2) def forward(self, x): x = F.gelu(self.input_(x)) x = self.dropout1(x) x = F.gelu(self.fc(x)) x = self.dropout2(x) x = F.gelu(self.fc1(x)) x = self.dropout3(x) x = F.gelu(self.output(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_mod1': 4, 'dim_mod2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_gelu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 512), (512, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (2048, 512), (512, 1)) assert_size_stride(primals_7, (2048,), (1,)) assert_size_stride(primals_8, (4, 2048), (2048, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(32768)](buf0, buf1, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 512), (1, 512 ), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) triton_poi_fused_gelu_0[grid(32768)](buf2, buf3, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 2048), (1, 512), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32) triton_poi_fused_gelu_1[grid(131072)](buf4, buf5, 131072, XBLOCK= 512, num_warps=8, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_gelu_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 512), (512, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 512), (512, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 2048), (2048, 1), 0 ), buf6, primals_8, primals_6, primals_4 class ModelRegressionGex2AdtNew(nn.Module): def __init__(self, dim_mod1, dim_mod2): super(ModelRegressionGex2AdtNew, self).__init__() self.input_ = nn.Linear(dim_mod1, 512) self.dropout1 = nn.Dropout(p=0.20335661386636347) self.dropout2 = nn.Dropout(p=0.15395289261127876) self.dropout3 = nn.Dropout(p=0.16902655078832815) self.fc = nn.Linear(512, 512) self.fc1 = nn.Linear(512, 2048) self.output = nn.Linear(2048, dim_mod2) def forward(self, input_0): primals_1 = self.input_.weight primals_2 = self.input_.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.output.weight primals_9 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Permoment-95/neurips2021_multimodal_topmethods
ModelRegressionGex2Adt
false
9,479
[ "MIT" ]
0
017bc23b366a80ba9b1c2a47ea6c44124f77a7ca
https://github.com/Permoment-95/neurips2021_multimodal_topmethods/tree/017bc23b366a80ba9b1c2a47ea6c44124f77a7ca
ResnetBlock
import torch from torchvision.transforms import * import torch.nn as nn import torch.utils.data import torch.nn.functional as F import torch.utils.data.distributed def actvn(x): out = F.leaky_relu(x, 0.2) return out class ResnetBlock(nn.Module): def __init__(self, fin, fout, fhidden=None, is_bias=True): super().__init__() self.is_bias = is_bias self.learned_shortcut = fin != fout self.fin = fin self.fout = fout if fhidden is None: self.fhidden = min(fin, fout) else: self.fhidden = fhidden self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1) self.conv_1 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=is_bias) if self.learned_shortcut: self.conv_s = nn.Conv2d(self.fin, self.fout, 1, stride=1, padding=0, bias=False) def forward(self, x): x_s = self._shortcut(x) dx = self.conv_0(actvn(x)) dx = self.conv_1(actvn(dx)) out = x_s + 0.1 * dx return out def _shortcut(self, x): if self.learned_shortcut: x_s = self.conv_s(x) else: x_s = x return x_s def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'fin': 4, 'fout': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torchvision.transforms import * import torch.nn as nn import torch.utils.data import torch.nn.functional as F import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.1 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_add_convolution_mul_2[grid(256)](buf5, primals_1, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_5 return buf5, primals_2, primals_4, buf0, buf2, buf3 def actvn(x): out = F.leaky_relu(x, 0.2) return out class ResnetBlockNew(nn.Module): def __init__(self, fin, fout, fhidden=None, is_bias=True): super().__init__() self.is_bias = is_bias self.learned_shortcut = fin != fout self.fin = fin self.fout = fout if fhidden is None: self.fhidden = min(fin, fout) else: self.fhidden = fhidden self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1) self.conv_1 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=is_bias) if self.learned_shortcut: self.conv_s = nn.Conv2d(self.fin, self.fout, 1, stride=1, padding=0, bias=False) def _shortcut(self, x): if self.learned_shortcut: x_s = self.conv_s(x) else: x_s = x return x_s def forward(self, input_0): primals_2 = self.conv_0.weight primals_3 = self.conv_0.bias primals_4 = self.conv_1.weight primals_5 = self.conv_1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Minsoo2022/graf
ResnetBlock
false
9,480
[ "MIT" ]
0
e763dd4ef59db1695dfc4bfc7e3f716c92d480a8
https://github.com/Minsoo2022/graf/tree/e763dd4ef59db1695dfc4bfc7e3f716c92d480a8
SplAtConv1d
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Conv1d from torch.nn import ReLU from torch.nn.modules.utils import _single class DropBlock1d(object): def __init__(self, *args, **kwargs): raise NotImplementedError class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplAtConv1d(Module): """Split-Attention Conv1d """ def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm_layer= None, dropblock_prob=0.0, **kwargs): super(SplAtConv1d, self).__init__() padding = _single(padding) self.rectify = rectify and padding[0] > 0 self.rectify_avg = rectify_avg inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.cardinality = groups self.channels = channels self.dropblock_prob = dropblock_prob if self.rectify: self.conv = RFConv1d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) else: None self.conv = Conv1d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.use_bn = norm_layer is not None if self.use_bn: self.bn0 = norm_layer(channels * radix) self.relu = ReLU(inplace=True) self.fc1 = Conv1d(channels, inter_channels, 1, groups=self.cardinality) if self.use_bn: self.bn1 = norm_layer(inter_channels) self.fc2 = Conv1d(inter_channels, channels * radix, 1, groups=self. cardinality) if dropblock_prob > 0.0: self.dropblock = DropBlock1d(dropblock_prob, 3) self.rsoftmax = rSoftMax(radix, groups) def forward(self, x): x = self.conv(x) if self.use_bn: x = self.bn0(x) if self.dropblock_prob > 0.0: x = self.dropblock(x) x = self.relu(x) batch, rchannel = x.shape[:2] if self.radix > 1: if torch.__version__ < '1.5': splited = torch.split(x, int(rchannel // self.radix), dim=1) else: splited = torch.split(x, rchannel // self.radix, dim=1) gap = sum(splited) else: gap = x gap = F.adaptive_avg_pool1d(gap, 1) gap = self.fc1(gap) if self.use_bn: gap = self.bn1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1) if self.radix > 1: if torch.__version__ < '1.5': attens = torch.split(atten, int(rchannel // self.radix), dim=1) else: attens = torch.split(atten, rchannel // self.radix, dim=1) out = sum([(att * split) for att, split in zip(attens, splited)]) else: out = atten * x return out.contiguous() def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.nn as nn import torch.nn.functional as F from torch.nn import Conv1d from torch.nn import ReLU from torch.nn.modules.utils import _single assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 8 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp6 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask) tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 * tmp6 tmp8 = tmp4 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (8, 2, 4), (8, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (32, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (8, 32, 1), (32, 1, 1)) assert_size_stride(primals_7, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=2, bias=None) assert_size_stride(buf0, (4, 8, 1), (8, 1, 1)) buf1 = buf0 del buf0 buf9 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(32)](buf1, primals_2, buf9, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_poi_fused_mean_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 4, 1 ), (4, 1, 0), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 1), (32, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_2[grid(128)](buf4, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf5, (4, 8, 1), (8, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_3[grid(32)](buf6, primals_7, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_7 buf7 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(32)](buf6, buf7, 32, XBLOCK=32, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_add_mul_5[grid(16)](buf7, buf1, buf8, 16, XBLOCK= 16, num_warps=1, num_stages=1) return (buf8, primals_1, primals_3, primals_4, primals_6, reinterpret_tensor(buf1, (4, 4, 1), (8, 1, 1), 0), reinterpret_tensor(buf1, (4, 4, 1), (8, 1, 1), 4), reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0), buf4, buf6, reinterpret_tensor(buf7, (4, 4, 1), (8, 1, 1), 0), reinterpret_tensor(buf7, (4, 4, 1), (8, 1, 1), 4), buf9) class DropBlock1d(object): def __init__(self, *args, **kwargs): raise NotImplementedError class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplAtConv1dNew(Module): """Split-Attention Conv1d """ def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm_layer= None, dropblock_prob=0.0, **kwargs): super(SplAtConv1dNew, self).__init__() padding = _single(padding) self.rectify = rectify and padding[0] > 0 self.rectify_avg = rectify_avg inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.cardinality = groups self.channels = channels self.dropblock_prob = dropblock_prob if self.rectify: self.conv = RFConv1d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) else: None self.conv = Conv1d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.use_bn = norm_layer is not None if self.use_bn: self.bn0 = norm_layer(channels * radix) self.relu = ReLU(inplace=True) self.fc1 = Conv1d(channels, inter_channels, 1, groups=self.cardinality) if self.use_bn: self.bn1 = norm_layer(inter_channels) self.fc2 = Conv1d(inter_channels, channels * radix, 1, groups=self. cardinality) if dropblock_prob > 0.0: self.dropblock = DropBlock1d(dropblock_prob, 3) self.rsoftmax = rSoftMax(radix, groups) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Neronjust2017/challenge2020_test4
SplAtConv1d
false
9,481
[ "BSD-2-Clause" ]
0
6494107a459b563aa51f8ea75c580c17557b13af
https://github.com/Neronjust2017/challenge2020_test4/tree/6494107a459b563aa51f8ea75c580c17557b13af
CosineLinear
import math import torch import torch.nn as nn import torch.nn.functional as F class CosineLinear(nn.Module): def __init__(self, in_features, out_features, sigma=True): super(CosineLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(out_features, in_features)) if sigma: self.sigma = nn.Parameter(torch.Tensor(1)) else: self.register_parameter('sigma', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.sigma is not None: self.sigma.data.fill_(1) def forward(self, input): out = F.linear(F.normalize(input, p=2, dim=1), F.normalize(self. weight, p=2, dim=1)) if self.sigma is not None: out = self.sigma * out return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_3, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_2, primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf2 class CosineLinearNew(nn.Module): def __init__(self, in_features, out_features, sigma=True): super(CosineLinearNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(out_features, in_features)) if sigma: self.sigma = nn.Parameter(torch.Tensor(1)) else: self.register_parameter('sigma', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.sigma is not None: self.sigma.data.fill_(1) def forward(self, input_0): primals_2 = self.weight primals_3 = self.sigma primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
QIU023/continual-learning-reproduce
CosineLinear
false
9,482
[ "MIT" ]
0
772faa6904b3488fa5deee14f03d86f3b3664a87
https://github.com/QIU023/continual-learning-reproduce/tree/772faa6904b3488fa5deee14f03d86f3b3664a87
SplitCosineLinear
import math import torch import torch.nn as nn import torch.nn.functional as F class CosineLinear(nn.Module): def __init__(self, in_features, out_features, sigma=True): super(CosineLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(out_features, in_features)) if sigma: self.sigma = nn.Parameter(torch.Tensor(1)) else: self.register_parameter('sigma', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.sigma is not None: self.sigma.data.fill_(1) def forward(self, input): out = F.linear(F.normalize(input, p=2, dim=1), F.normalize(self. weight, p=2, dim=1)) if self.sigma is not None: out = self.sigma * out return out class SplitCosineLinear(nn.Module): def __init__(self, in_features, out_features1, out_features2, sigma=True): super(SplitCosineLinear, self).__init__() self.in_features = in_features self.out_features = out_features1 + out_features2 self.fc1 = CosineLinear(in_features, out_features1, False) self.fc2 = CosineLinear(in_features, out_features2, False) if sigma: self.sigma = nn.Parameter(torch.Tensor(1)) self.sigma.data.fill_(1) else: self.register_parameter('sigma', None) self.old_scores_hook = self.fc1.register_forward_hook(self. get_old_scores_before_scale_fn) self.new_scores_hook = self.fc2.register_forward_hook(self. get_new_scores_before_scale_fn) def get_old_scores_before_scale_fn(self, module, inputs, outputs): self.old_scores = outputs def get_new_scores_before_scale_fn(self, module, inputs, outputs): self.new_scores = outputs def get_old_scores(self): return self.old_scores def get_new_scores(self): return self.new_scores def forward(self, x): out1 = self.fc1(x) out2 = self.fc2(x) out = torch.cat((out1, out2), dim=1) if self.sigma is not None: out = self.sigma * out return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features1': 4, 'out_features2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_cat_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp11 = tl.load(in_ptr2 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp13 = tmp12 * tmp10 tl.store(out_ptr0 + x3, tmp10, xmask) tl.store(out_ptr1 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = buf1 del buf1 triton_poi_fused_div_1[grid(16)](primals_3, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 4), (1, 4), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_mul_2[grid(512)](buf2, buf4, primals_4, buf5, buf6, 512, XBLOCK=256, num_warps=4, num_stages=1) return buf6, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, primals_3, primals_4, reinterpret_tensor(buf0, (64, 4 ), (4, 1), 0), buf5 class CosineLinear(nn.Module): def __init__(self, in_features, out_features, sigma=True): super(CosineLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(out_features, in_features)) if sigma: self.sigma = nn.Parameter(torch.Tensor(1)) else: self.register_parameter('sigma', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.sigma is not None: self.sigma.data.fill_(1) def forward(self, input): out = F.linear(F.normalize(input, p=2, dim=1), F.normalize(self. weight, p=2, dim=1)) if self.sigma is not None: out = self.sigma * out return out class SplitCosineLinearNew(nn.Module): def __init__(self, in_features, out_features1, out_features2, sigma=True): super(SplitCosineLinearNew, self).__init__() self.in_features = in_features self.out_features = out_features1 + out_features2 self.fc1 = CosineLinear(in_features, out_features1, False) self.fc2 = CosineLinear(in_features, out_features2, False) if sigma: self.sigma = nn.Parameter(torch.Tensor(1)) self.sigma.data.fill_(1) else: self.register_parameter('sigma', None) self.old_scores_hook = self.fc1.register_forward_hook(self. get_old_scores_before_scale_fn) self.new_scores_hook = self.fc2.register_forward_hook(self. get_new_scores_before_scale_fn) def get_old_scores_before_scale_fn(self, module, inputs, outputs): self.old_scores = outputs def get_new_scores_before_scale_fn(self, module, inputs, outputs): self.new_scores = outputs def get_old_scores(self): return self.old_scores def get_new_scores(self): return self.new_scores def forward(self, input_0): primals_4 = self.sigma primals_2 = self.fc1.weight primals_3 = self.fc2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
QIU023/continual-learning-reproduce
SplitCosineLinear
false
9,483
[ "MIT" ]
0
772faa6904b3488fa5deee14f03d86f3b3664a87
https://github.com/QIU023/continual-learning-reproduce/tree/772faa6904b3488fa5deee14f03d86f3b3664a87
SoftTargetCrossEntropy
import torch from typing import * import torch.nn.functional as F from torch.nn.modules.loss import _WeightedLoss class SoftTargetCrossEntropy(_WeightedLoss): def __init__(self, weight=None, reduction='mean'): super().__init__(weight=weight, reduction=reduction) self.weight = weight self.reduction = reduction def forward(self, inputs, targets): lsm = F.log_softmax(inputs, -1) if self.weight is not None: lsm = lsm * self.weight.unsqueeze(0) loss = -(targets * lsm).sum(-1) if self.reduction == 'sum': loss = loss.sum() elif self.reduction == 'mean': loss = loss.mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from typing import * from torch.nn.modules.loss import _WeightedLoss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp1 - tmp12 tmp14 = tmp0 * tmp13 tmp16 = tmp3 - tmp12 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp20 = tmp6 - tmp12 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp24 = tmp9 - tmp12 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tmp31 = 64.0 tmp32 = tmp30 / tmp31 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf2, arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class SoftTargetCrossEntropyNew(_WeightedLoss): def __init__(self, weight=None, reduction='mean'): super().__init__(weight=weight, reduction=reduction) self.weight = weight self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SuHuynh/leaf-disease-classification-kaggle
SoftTargetCrossEntropy
false
9,484
[ "MIT" ]
0
b1c15881de5a20e590a69f6b2fbb476b003bc077
https://github.com/SuHuynh/leaf-disease-classification-kaggle/tree/b1c15881de5a20e590a69f6b2fbb476b003bc077
PointWiseConvolution
import torch from torch import nn as nn class PointWiseConvolution(nn.Module): def __init__(self, inChannels, outChannels, stride, expansionFactor, isNormal): super(PointWiseConvolution, self).__init__() if isNormal: self.layer = nn.Conv2d(in_channels=inChannels * expansionFactor, out_channels=outChannels, kernel_size=1, stride=stride, bias=True) else: self.layer = nn.Conv2d(in_channels=inChannels, out_channels= inChannels * expansionFactor, kernel_size=1, stride=stride, bias=True) def forward(self, x): return self.layer(x) def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {'inChannels': 4, 'outChannels': 4, 'stride': 1, 'expansionFactor': 4, 'isNormal': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(65536)](buf1, primals_2, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class PointWiseConvolutionNew(nn.Module): def __init__(self, inChannels, outChannels, stride, expansionFactor, isNormal): super(PointWiseConvolutionNew, self).__init__() if isNormal: self.layer = nn.Conv2d(in_channels=inChannels * expansionFactor, out_channels=outChannels, kernel_size=1, stride=stride, bias=True) else: self.layer = nn.Conv2d(in_channels=inChannels, out_channels= inChannels * expansionFactor, kernel_size=1, stride=stride, bias=True) def forward(self, input_0): primals_1 = self.layer.weight primals_2 = self.layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Pranshu-Bahadur/g2net
PointWiseConvolution
false
9,485
[ "MIT" ]
0
a117df7699837c9a3ae21ec59a310d7384369601
https://github.com/Pranshu-Bahadur/g2net/tree/a117df7699837c9a3ae21ec59a310d7384369601
ConvLayer
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter from itertools import product as product class NormLayer(nn.Module): """Normalization Layers. Args: channels: input channels, for batch norm and instance norm. input_size: input shape without batch size, for layer norm. """ def __init__(self, channels, normalize_shape=None, norm_type='bn'): super(NormLayer, self).__init__() norm_type = norm_type.lower() self.norm_type = norm_type if norm_type == 'bn': self.norm = nn.BatchNorm2d(channels, affine=True) elif norm_type == 'in': self.norm = nn.InstanceNorm2d(channels, affine=False) elif norm_type == 'gn': self.norm = nn.GroupNorm(32, channels, affine=True) elif norm_type == 'pixel': self.norm = lambda x: F.normalize(x, p=2, dim=1) elif norm_type == 'layer': self.norm = nn.LayerNorm(normalize_shape) elif norm_type == 'none': self.norm = lambda x: x * 1.0 else: assert 1 == 0, f'Norm type {norm_type} not support.' def forward(self, x, ref=None): if self.norm_type == 'spade': return self.norm(x, ref) else: return self.norm(x) class ReluLayer(nn.Module): """Relu Layer. Args: relu type: type of relu layer, candidates are - ReLU - LeakyReLU: default relu slope 0.2 - PRelu - SELU - none: direct pass """ def __init__(self, channels, relu_type='relu'): super(ReluLayer, self).__init__() relu_type = relu_type.lower() if relu_type == 'relu': self.func = nn.ReLU(True) elif relu_type == 'leakyrelu': self.func = nn.LeakyReLU(0.2, inplace=True) elif relu_type == 'prelu': self.func = nn.PReLU(channels) elif relu_type == 'selu': self.func = nn.SELU(True) elif relu_type == 'none': self.func = lambda x: x * 1.0 else: assert 1 == 0, f'Relu type {relu_type} not support.' def forward(self, x): return self.func(x) class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, scale= 'none', norm_type='none', relu_type='none', use_pad=True, bias=True): super(ConvLayer, self).__init__() self.use_pad = use_pad self.norm_type = norm_type if norm_type in ['bn']: bias = False stride = 2 if scale == 'down' else 1 self.scale_func = lambda x: x if scale == 'up': self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest') self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.0) / 2))) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias) self.relu = ReluLayer(out_channels, relu_type) self.norm = NormLayer(out_channels, norm_type=norm_type) def forward(self, x): out = self.scale_func(x) if self.use_pad: out = self.reflection_pad(out) out = self.conv2d(out) out = self.norm(out) out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.nn.functional as F from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tl.store(in_out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_mul_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class NormLayer(nn.Module): """Normalization Layers. Args: channels: input channels, for batch norm and instance norm. input_size: input shape without batch size, for layer norm. """ def __init__(self, channels, normalize_shape=None, norm_type='bn'): super(NormLayer, self).__init__() norm_type = norm_type.lower() self.norm_type = norm_type if norm_type == 'bn': self.norm = nn.BatchNorm2d(channels, affine=True) elif norm_type == 'in': self.norm = nn.InstanceNorm2d(channels, affine=False) elif norm_type == 'gn': self.norm = nn.GroupNorm(32, channels, affine=True) elif norm_type == 'pixel': self.norm = lambda x: F.normalize(x, p=2, dim=1) elif norm_type == 'layer': self.norm = nn.LayerNorm(normalize_shape) elif norm_type == 'none': self.norm = lambda x: x * 1.0 else: assert 1 == 0, f'Norm type {norm_type} not support.' def forward(self, x, ref=None): if self.norm_type == 'spade': return self.norm(x, ref) else: return self.norm(x) class ReluLayer(nn.Module): """Relu Layer. Args: relu type: type of relu layer, candidates are - ReLU - LeakyReLU: default relu slope 0.2 - PRelu - SELU - none: direct pass """ def __init__(self, channels, relu_type='relu'): super(ReluLayer, self).__init__() relu_type = relu_type.lower() if relu_type == 'relu': self.func = nn.ReLU(True) elif relu_type == 'leakyrelu': self.func = nn.LeakyReLU(0.2, inplace=True) elif relu_type == 'prelu': self.func = nn.PReLU(channels) elif relu_type == 'selu': self.func = nn.SELU(True) elif relu_type == 'none': self.func = lambda x: x * 1.0 else: assert 1 == 0, f'Relu type {relu_type} not support.' def forward(self, x): return self.func(x) class ConvLayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, scale= 'none', norm_type='none', relu_type='none', use_pad=True, bias=True): super(ConvLayerNew, self).__init__() self.use_pad = use_pad self.norm_type = norm_type if norm_type in ['bn']: bias = False stride = 2 if scale == 'down' else 1 self.scale_func = lambda x: x if scale == 'up': self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest') self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.0) / 2))) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias) self.relu = ReluLayer(out_channels, relu_type) self.norm = NormLayer(out_channels, norm_type=norm_type) def forward(self, input_0): primals_2 = self.conv2d.weight primals_3 = self.conv2d.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Cospel/facexlib
ConvLayer
false
9,486
[ "MIT" ]
0
2471ddb44b1d61306c6d7fcf56846b9e4aeea4aa
https://github.com/Cospel/facexlib/tree/2471ddb44b1d61306c6d7fcf56846b9e4aeea4aa
SelfExpression
import torch import torch.nn as nn class SelfExpression(nn.Module): def __init__(self, n): super(SelfExpression, self).__init__() self.Coefficient = nn.Parameter(0.0001 * torch.ones(n, n, dtype= torch.float32), requires_grad=True) def forward(self, x): y = torch.matmul(self.Coefficient, x) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_2, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(64, 4)](buf1, buf2, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class SelfExpressionNew(nn.Module): def __init__(self, n): super(SelfExpressionNew, self).__init__() self.Coefficient = nn.Parameter(0.0001 * torch.ones(n, n, dtype= torch.float32), requires_grad=True) def forward(self, input_0): primals_1 = self.Coefficient primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ShulingTang/DSC-Net
SelfExpression
false
9,487
[ "MIT" ]
0
2da1e0c654b045057c654cbcbb8a8c23fb832c9d
https://github.com/ShulingTang/DSC-Net/tree/2da1e0c654b045057c654cbcbb8a8c23fb832c9d
ModelRegressionGex2Atac
import torch import torch.utils.data import torch.nn.functional as F import torch.nn as nn class ModelRegressionGex2Atac(nn.Module): def __init__(self, dim_mod1, dim_mod2): super(ModelRegressionGex2Atac, self).__init__() self.input_ = nn.Linear(dim_mod1, 1024) self.fc = nn.Linear(1024, 256) self.fc1 = nn.Linear(256, 2048) self.dropout1 = nn.Dropout(p=0.298885630228993) self.dropout2 = nn.Dropout(p=0.11289717442776658) self.dropout3 = nn.Dropout(p=0.13523634924414762) self.output = nn.Linear(2048, dim_mod2) def forward(self, x): x = F.gelu(self.input_(x)) x = self.dropout1(x) x = F.gelu(self.fc(x)) x = self.dropout2(x) x = F.gelu(self.fc1(x)) x = self.dropout3(x) x = F.gelu(self.output(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_mod1': 4, 'dim_mod2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_gelu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 1024), (1024, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (2048, 256), (256, 1)) assert_size_stride(primals_7, (2048,), (1,)) assert_size_stride(primals_8, (4, 2048), (2048, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(65536)](buf0, buf1, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0), reinterpret_tensor(primals_4, (1024, 256), (1, 1024), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.float32) triton_poi_fused_gelu_1[grid(16384)](buf2, buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 2048), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32) triton_poi_fused_gelu_2[grid(131072)](buf4, buf5, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_gelu_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 2048), (2048, 1), 0 ), buf6, primals_8, primals_6, primals_4 class ModelRegressionGex2AtacNew(nn.Module): def __init__(self, dim_mod1, dim_mod2): super(ModelRegressionGex2AtacNew, self).__init__() self.input_ = nn.Linear(dim_mod1, 1024) self.fc = nn.Linear(1024, 256) self.fc1 = nn.Linear(256, 2048) self.dropout1 = nn.Dropout(p=0.298885630228993) self.dropout2 = nn.Dropout(p=0.11289717442776658) self.dropout3 = nn.Dropout(p=0.13523634924414762) self.output = nn.Linear(2048, dim_mod2) def forward(self, input_0): primals_1 = self.input_.weight primals_2 = self.input_.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.output.weight primals_9 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Permoment-95/neurips2021_multimodal_topmethods
ModelRegressionGex2Atac
false
9,488
[ "MIT" ]
0
017bc23b366a80ba9b1c2a47ea6c44124f77a7ca
https://github.com/Permoment-95/neurips2021_multimodal_topmethods/tree/017bc23b366a80ba9b1c2a47ea6c44124f77a7ca
ModelRegressionAtac2Gex
import torch import torch.utils.data import torch.nn.functional as F import torch.nn as nn class ModelRegressionAtac2Gex(nn.Module): def __init__(self, dim_mod1, dim_mod2): super(ModelRegressionAtac2Gex, self).__init__() self.input_ = nn.Linear(dim_mod1, 2048) self.fc = nn.Linear(2048, 2048) self.fc1 = nn.Linear(2048, 512) self.dropout1 = nn.Dropout(p=0.2649138776004753) self.dropout2 = nn.Dropout(p=0.1769628308148758) self.dropout3 = nn.Dropout(p=0.2516791883012817) self.output = nn.Linear(512, dim_mod2) def forward(self, x): x = F.gelu(self.input_(x)) x = self.dropout1(x) x = F.gelu(self.fc(x)) x = self.dropout2(x) x = F.gelu(self.fc1(x)) x = self.dropout3(x) x = F.gelu(self.output(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_mod1': 4, 'dim_mod2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_gelu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (2048, 4), (4, 1)) assert_size_stride(primals_2, (2048,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2048, 2048), (2048, 1)) assert_size_stride(primals_5, (2048,), (1,)) assert_size_stride(primals_6, (512, 2048), (2048, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (4, 512), (512, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(131072)](buf0, buf1, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32) triton_poi_fused_gelu_0[grid(131072)](buf2, buf3, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_6, (2048, 512), (1, 2048), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) triton_poi_fused_gelu_1[grid(32768)](buf4, buf5, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_gelu_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 512), (512, 1), 0 ), buf6, primals_8, primals_6, primals_4 class ModelRegressionAtac2GexNew(nn.Module): def __init__(self, dim_mod1, dim_mod2): super(ModelRegressionAtac2GexNew, self).__init__() self.input_ = nn.Linear(dim_mod1, 2048) self.fc = nn.Linear(2048, 2048) self.fc1 = nn.Linear(2048, 512) self.dropout1 = nn.Dropout(p=0.2649138776004753) self.dropout2 = nn.Dropout(p=0.1769628308148758) self.dropout3 = nn.Dropout(p=0.2516791883012817) self.output = nn.Linear(512, dim_mod2) def forward(self, input_0): primals_1 = self.input_.weight primals_2 = self.input_.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.output.weight primals_9 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Permoment-95/neurips2021_multimodal_topmethods
ModelRegressionAtac2Gex
false
9,489
[ "MIT" ]
0
017bc23b366a80ba9b1c2a47ea6c44124f77a7ca
https://github.com/Permoment-95/neurips2021_multimodal_topmethods/tree/017bc23b366a80ba9b1c2a47ea6c44124f77a7ca
D_DownBlock
import torch from torchvision.transforms import * class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class D_DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None): super(D_DownBlock, self).__init__() self.conv = ConvBlock(num_filter * num_stages, num_filter, 1, 1, 0, activation, norm=None) self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, x): x = self.conv(x) l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_filter': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_1[grid(16)](buf4, primals_6, primals_7, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_2[grid(256)](buf7, primals_9, primals_10, buf2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf9 = extern_kernels.convolution(buf8, primals_11, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 1, 1), (4, 1, 1, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_3[grid(16)](buf10, primals_12, primals_13, buf5, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 return (buf11, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, buf1, buf2, buf4, buf5, buf7, buf8, buf10) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class D_DownBlockNew(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None): super(D_DownBlockNew, self).__init__() self.conv = ConvBlock(num_filter * num_stages, num_filter, 1, 1, 0, activation, norm=None) self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, input_0): primals_1 = self.conv.conv.weight primals_2 = self.conv.conv.bias primals_4 = self.conv.act.weight primals_5 = self.down_conv1.conv.weight primals_6 = self.down_conv1.conv.bias primals_7 = self.down_conv1.act.weight primals_8 = self.down_conv2.deconv.weight primals_9 = self.down_conv2.deconv.bias primals_10 = self.down_conv2.act.weight primals_11 = self.down_conv3.conv.weight primals_12 = self.down_conv3.conv.bias primals_13 = self.down_conv3.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
EvgeneyZ/RBPN
D_DownBlock
false
9,490
[ "MIT" ]
0
acfe636cc48a4fbfea78f934a251c32e53367659
https://github.com/EvgeneyZ/RBPN/tree/acfe636cc48a4fbfea78f934a251c32e53367659
BaselineEstimator
import torch import torch.nn.functional as F from torch import nn import torch.utils.data import torch.onnx.operators import torch.optim import torch.optim.lr_scheduler class BaselineEstimator(nn.Module): def __init__(self, input_size): super(BaselineEstimator, self).__init__() self.ff1 = nn.Linear(input_size, input_size * 4) self.ff2 = nn.Linear(input_size * 4, 1) def forward(self, input, mean=False): input = input.detach() if mean: input = input.mean(axis=0) out = self.ff1(input) out = F.relu(out) out = self.ff2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data import torch.onnx.operators import torch.optim import torch.optim.lr_scheduler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (1, 16), (16, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_3, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 1), (1, 16), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf4 class BaselineEstimatorNew(nn.Module): def __init__(self, input_size): super(BaselineEstimatorNew, self).__init__() self.ff1 = nn.Linear(input_size, input_size * 4) self.ff2 = nn.Linear(input_size * 4, 1) def forward(self, input_0): primals_2 = self.ff1.weight primals_3 = self.ff1.bias primals_4 = self.ff2.weight primals_5 = self.ff2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
StDario/fairseq-rl
BaselineEstimator
false
9,491
[ "BSD-3-Clause" ]
0
96a0ee4db1a2d1781d565a2539c20ed392dfb608
https://github.com/StDario/fairseq-rl/tree/96a0ee4db1a2d1781d565a2539c20ed392dfb608
CMlp
import torch import torch.nn as nn class CMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, 1) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, 1) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_gelu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_gelu_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(256)](buf4, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf4, primals_1, primals_3, primals_4, buf1, buf2 class CMlpNew(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, 1) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, 1) self.drop = nn.Dropout(drop) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
SteveTsui/DS-Net
CMlp
false
9,492
[ "Apache-2.0" ]
0
c54585e7af40002178b7e06fc3ee09160e0d775c
https://github.com/SteveTsui/DS-Net/tree/c54585e7af40002178b7e06fc3ee09160e0d775c
HardTripletLoss
import torch import torch.nn as nn import torch.nn.functional as F def _get_anchor_negative_triplet_mask(labels): labels_equal = torch.unsqueeze(labels, 0) == torch.unsqueeze(labels, 1) mask = labels_equal ^ 1 return mask def _get_anchor_positive_triplet_mask(labels): torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') indices_not_equal = torch.eye(labels.shape[0]).byte() ^ 1 labels_equal = torch.unsqueeze(labels, 0) == torch.unsqueeze(labels, 1) mask = indices_not_equal * labels_equal return mask def _get_triplet_mask(labels): torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') indices_equal = torch.eye(labels.size(0), dtype=torch.bool, device= labels.device) indices_not_equal = ~indices_equal i_not_equal_j = indices_not_equal.unsqueeze(2) i_not_equal_k = indices_not_equal.unsqueeze(1) j_not_equal_k = indices_not_equal.unsqueeze(0) distinct_indices = i_not_equal_j & i_not_equal_k & j_not_equal_k label_equal = labels.unsqueeze(0) == labels.unsqueeze(1) i_equal_j = label_equal.unsqueeze(2) i_equal_k = label_equal.unsqueeze(1) valid_labels = ~i_equal_k & i_equal_j mask = valid_labels & distinct_indices return mask def _pairwise_distance(x, squared=False, eps=1e-16): cor_mat = torch.matmul(x, x.t()) norm_mat = cor_mat.diag() distances = norm_mat.unsqueeze(1) - 2 * cor_mat + norm_mat.unsqueeze(0) distances = F.relu(distances) if not squared: mask = torch.eq(distances, 0.0).float() distances = distances + mask * eps distances = torch.sqrt(distances) distances = distances * (1.0 - mask) return distances class HardTripletLoss(nn.Module): """Hard/Hardest Triplet Loss (pytorch implementation of https://omoindrot.github.io/triplet-loss) For each anchor, we get the hardest positive and hardest negative to form a triplet. """ def __init__(self, margin=0.1, hardest=False, squared=False): """ Args: margin: margin for triplet loss hardest: If true, loss is considered only hardest triplets. squared: If true, output is the pairwise squared euclidean distance matrix. If false, output is the pairwise euclidean distance matrix. """ super(HardTripletLoss, self).__init__() self.margin = margin self.hardest = hardest self.squared = squared def forward(self, embeddings, labels): """ Args: labels: labels of the batch, of size (batch_size,) embeddings: tensor of shape (batch_size, embed_dim) Returns: triplet_loss: scalar tensor containing the triplet loss """ pairwise_dist = _pairwise_distance(embeddings, squared=self.squared) if self.hardest: mask_anchor_positive = _get_anchor_positive_triplet_mask(labels ).float() valid_positive_dist = pairwise_dist * mask_anchor_positive hardest_positive_dist, _ = torch.max(valid_positive_dist, dim=1, keepdim=True) mask_anchor_negative = _get_anchor_negative_triplet_mask(labels ).float() max_anchor_negative_dist, _ = torch.max(pairwise_dist, dim=1, keepdim=True) anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * ( 1.0 - mask_anchor_negative) hardest_negative_dist, _ = torch.min(anchor_negative_dist, dim= 1, keepdim=True) triplet_loss = F.relu(hardest_positive_dist - hardest_negative_dist + 0.1) triplet_loss = torch.mean(triplet_loss) else: anc_pos_dist = pairwise_dist.unsqueeze(dim=2) anc_neg_dist = pairwise_dist.unsqueeze(dim=1) loss = anc_pos_dist - anc_neg_dist + self.margin mask = _get_triplet_mask(labels).float() triplet_loss = loss * mask triplet_loss = F.relu(triplet_loss) hard_triplets = torch.gt(triplet_loss, 1e-16).float() num_hard_triplets = torch.sum(hard_triplets) triplet_loss = torch.sum(triplet_loss) / (num_hard_triplets + 1e-16 ) return triplet_loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x4 = xindex // 4 x1 = xindex // 4 % 4 x0 = xindex % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + 5 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + 5 * x1, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 - tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 == tmp9 tmp11 = tmp10.to(tl.float32) tmp12 = 1e-16 tmp13 = tmp11 * tmp12 tmp14 = tmp8 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = 1.0 tmp17 = tmp16 - tmp11 tmp18 = tmp15 * tmp17 tmp20 = tmp19 * tmp2 tmp21 = tmp0 - tmp20 tmp23 = tmp21 + tmp22 tmp24 = triton_helpers.maximum(tmp7, tmp23) tmp25 = tmp24 == tmp9 tmp26 = tmp25.to(tl.float32) tmp27 = tmp26 * tmp12 tmp28 = tmp24 + tmp27 tmp29 = libdevice.sqrt(tmp28) tmp30 = tmp16 - tmp26 tmp31 = tmp29 * tmp30 tmp32 = tmp18 - tmp31 tl.store(out_ptr0 + x5, tmp32, xmask) @triton.jit def triton_red_fused__to_copy_add_bitwise_and_bitwise_not_div_gt_mul_relu_sum_1( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp33 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r7 = rindex % 64 r9 = rindex % 256 r5 = rindex // 1024 r4 = rindex // 256 % 4 r2 = rindex // 16 % 4 r1 = rindex // 4 % 4 r0 = rindex % 4 tmp0 = tl.load(in_ptr0 + r7, rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + r9, rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr1 + (r7 + 64 * r5), rmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl.load(in_ptr1 + (r7 + 64 * r4), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.1 tmp2 = tmp0 + tmp1 tmp5 = tmp3 == tmp4 tmp6 = tmp5 == 0 tmp8 = tmp7 == tmp4 tmp9 = tmp6 & tmp8 tmp10 = r2 tmp11 = r1 tmp12 = tmp10 == tmp11 tmp13 = tmp12 == 0 tmp14 = r0 tmp15 = tmp10 == tmp14 tmp16 = tmp15 == 0 tmp17 = tmp13 & tmp16 tmp18 = tmp11 == tmp14 tmp19 = tmp18 == 0 tmp20 = tmp17 & tmp19 tmp21 = tmp9 & tmp20 tmp22 = tmp21.to(tl.float32) tmp23 = tmp2 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask, tmp28, _tmp27) tmp29 = 1e-16 tmp30 = tmp25 > tmp29 tmp31 = tmp30.to(tl.float32) tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp34 = _tmp33 + tmp32 _tmp33 = tl.where(rmask, tmp34, _tmp33) tmp27 = tl.sum(_tmp27, 1)[:, None] tmp33 = tl.sum(_tmp33, 1)[:, None] tmp35 = 1e-16 tmp36 = tmp33 + tmp35 tmp37 = tmp27 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 triton_red_fused__to_copy_add_bitwise_and_bitwise_not_div_gt_mul_relu_sum_1[ grid(1)](buf4, buf1, arg1_1, 1, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del arg1_1 del buf1 return buf4, def _get_anchor_negative_triplet_mask(labels): labels_equal = torch.unsqueeze(labels, 0) == torch.unsqueeze(labels, 1) mask = labels_equal ^ 1 return mask def _get_anchor_positive_triplet_mask(labels): torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') indices_not_equal = torch.eye(labels.shape[0]).byte() ^ 1 labels_equal = torch.unsqueeze(labels, 0) == torch.unsqueeze(labels, 1) mask = indices_not_equal * labels_equal return mask def _get_triplet_mask(labels): torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') indices_equal = torch.eye(labels.size(0), dtype=torch.bool, device= labels.device) indices_not_equal = ~indices_equal i_not_equal_j = indices_not_equal.unsqueeze(2) i_not_equal_k = indices_not_equal.unsqueeze(1) j_not_equal_k = indices_not_equal.unsqueeze(0) distinct_indices = i_not_equal_j & i_not_equal_k & j_not_equal_k label_equal = labels.unsqueeze(0) == labels.unsqueeze(1) i_equal_j = label_equal.unsqueeze(2) i_equal_k = label_equal.unsqueeze(1) valid_labels = ~i_equal_k & i_equal_j mask = valid_labels & distinct_indices return mask def _pairwise_distance(x, squared=False, eps=1e-16): cor_mat = torch.matmul(x, x.t()) norm_mat = cor_mat.diag() distances = norm_mat.unsqueeze(1) - 2 * cor_mat + norm_mat.unsqueeze(0) distances = F.relu(distances) if not squared: mask = torch.eq(distances, 0.0).float() distances = distances + mask * eps distances = torch.sqrt(distances) distances = distances * (1.0 - mask) return distances class HardTripletLossNew(nn.Module): """Hard/Hardest Triplet Loss (pytorch implementation of https://omoindrot.github.io/triplet-loss) For each anchor, we get the hardest positive and hardest negative to form a triplet. """ def __init__(self, margin=0.1, hardest=False, squared=False): """ Args: margin: margin for triplet loss hardest: If true, loss is considered only hardest triplets. squared: If true, output is the pairwise squared euclidean distance matrix. If false, output is the pairwise euclidean distance matrix. """ super(HardTripletLossNew, self).__init__() self.margin = margin self.hardest = hardest self.squared = squared def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Shubodh/NetVLAD-pytorch
HardTripletLoss
false
9,493
[ "MIT" ]
0
ea45bac16dbb3e3bec4172df58715bf3526ee502
https://github.com/Shubodh/NetVLAD-pytorch/tree/ea45bac16dbb3e3bec4172df58715bf3526ee502
DepthWiseConvolution
import torch from torch import nn as nn class DepthWiseConvolution(nn.Module): def __init__(self, channels, kernelSize, stride, expansionFactor): super(DepthWiseConvolution, self).__init__() channels = channels * expansionFactor self.layer = nn.Conv2d(channels, channels, kernelSize, stride, ( kernelSize - 1) // 2, groups=channels, bias=True) def forward(self, x): return self.layer(x) def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {'channels': 4, 'kernelSize': 4, 'stride': 1, 'expansionFactor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 254016 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3969 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=16, bias=None) assert_size_stride(buf0, (4, 16, 63, 63), (63504, 3969, 63, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(254016)](buf1, primals_2, 254016, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class DepthWiseConvolutionNew(nn.Module): def __init__(self, channels, kernelSize, stride, expansionFactor): super(DepthWiseConvolutionNew, self).__init__() channels = channels * expansionFactor self.layer = nn.Conv2d(channels, channels, kernelSize, stride, ( kernelSize - 1) // 2, groups=channels, bias=True) def forward(self, input_0): primals_1 = self.layer.weight primals_2 = self.layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Pranshu-Bahadur/g2net
DepthWiseConvolution
false
9,494
[ "MIT" ]
0
a117df7699837c9a3ae21ec59a310d7384369601
https://github.com/Pranshu-Bahadur/g2net/tree/a117df7699837c9a3ae21ec59a310d7384369601
UpBlock
import torch import torch.nn as nn from torch.nn import functional as F class UpBlock(nn.Module): """Upsample block for DRRG and TextSnake.""" def __init__(self, in_channels, out_channels): super().__init__() assert isinstance(in_channels, int) assert isinstance(out_channels, int) self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) self.deconv = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=4, stride=2, padding=1) def forward(self, x): x = F.relu(self.conv1x1(x)) x = F.relu(self.conv3x3(x)) x = self.deconv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 8, 8), (256, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_1[grid(1024)](buf5, primals_7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3 class UpBlockNew(nn.Module): """Upsample block for DRRG and TextSnake.""" def __init__(self, in_channels, out_channels): super().__init__() assert isinstance(in_channels, int) assert isinstance(out_channels, int) self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) self.deconv = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=4, stride=2, padding=1) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_4 = self.conv3x3.weight primals_5 = self.conv3x3.bias primals_3 = self.deconv.weight primals_7 = self.deconv.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
SamDM/mmocr
UpBlock
false
9,495
[ "Apache-2.0" ]
0
4cb69141ff8d28c8b1437bf28242e368a0e6ec4f
https://github.com/SamDM/mmocr/tree/4cb69141ff8d28c8b1437bf28242e368a0e6ec4f
Mlayer
import torch import torch.nn as nn class Mlayer(nn.Module): def __init__(self, in_channel, out_channel, stride=1): super(Mlayer, self).__init__() m_s = torch.zeros([1, in_channel, 1, 1], requires_grad=True) self.m_s = torch.nn.Parameter(m_s) self.register_parameter('m_scale', self.m_s) self.func = nn.Identity() if in_channel != out_channel: self.func = nn.Conv2d(in_channels=in_channel, out_channels= out_channel, kernel_size=1, stride=stride, padding=0) def forward(self, input): x = input * self.m_s x = self.func(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class MlayerNew(nn.Module): def __init__(self, in_channel, out_channel, stride=1): super(MlayerNew, self).__init__() m_s = torch.zeros([1, in_channel, 1, 1], requires_grad=True) self.m_s = torch.nn.Parameter(m_s) self.register_parameter('m_scale', self.m_s) self.func = nn.Identity() if in_channel != out_channel: self.func = nn.Conv2d(in_channels=in_channel, out_channels= out_channel, kernel_size=1, stride=stride, padding=0) def forward(self, input_0): primals_1 = self.m_s primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
Sharingsky/resrep
Mlayer
false
9,496
[ "MIT" ]
0
a173d1bc256b75b2c902024929e406863ce48b9b
https://github.com/Sharingsky/resrep/tree/a173d1bc256b75b2c902024929e406863ce48b9b
ModulatedConv2d
from torch.autograd import Function import math import random import torch from torch import nn from torch.nn import functional as F def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5): if input.device.type == 'cpu': if bias is not None: rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], * rest_dim), negative_slope=0.2) * scale else: return F.leaky_relu(input, negative_slope=0.2) * scale else: return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, bias, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) if bias: grad_bias = grad_input.sum(dim).detach() else: grad_bias = None return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) if bias is None: bias = empty ctx.bias = bias is not None out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, modulation_type='style', factorization_rank=5, num_kernels=1, use_sigmoid=False, demodulate=True, upsample=False, downsample= False, blur_kernel=[1, 3, 3, 1]): super().__init__() assert modulation_type in ['style', 'factorized'] assert num_kernels > 0 self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample self.factorization_rank = factorization_rank self.use_sigmoid = use_sigmoid self.modulation_type = modulation_type self.num_kernels = num_kernels if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(num_kernels, out_channel, in_channel, kernel_size, kernel_size)) if num_kernels > 1: self.kernel_attention = EqualLinear(style_dim, num_kernels, bias_init=0, lr_mul=1.0) if modulation_type == 'style': self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) if modulation_type == 'factorized': if use_sigmoid: self.modulation = EqualLinear(style_dim, (in_channel + out_channel) * self.factorization_rank, bias_init=0) else: self.modulation = EqualLinear(style_dim, (in_channel + out_channel) * self.factorization_rank, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style, epsilon_greedy=0.1, softmax=None): batch, in_channel, height, width = input.shape weight = self.weight if self.num_kernels > 1: if softmax is None: logits = self.kernel_attention(style) * 1.0 softmax = nn.functional.softmax(logits, dim=1) if random.random() < epsilon_greedy: logit_noise = torch.randn(logits.shape) softmax_noise = nn.functional.softmax(logit_noise, dim=1) alpha = random.random() / 2.0 softmax = softmax * (1.0 - alpha) + softmax_noise * alpha assert softmax.ndim == 2 weight = torch.unsqueeze(weight, dim=0) * softmax.view(batch, self.num_kernels, 1, 1, 1, 1) weight = weight.sum(dim=1) if self.modulation_type == 'style': style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * weight * style if self.modulation_type == 'factorized': ab = self.modulation(style) a, b = ab[:, :self.out_channel * self.factorization_rank], ab[:, self.out_channel * self.factorization_rank:] a, b = a.view(batch, self.out_channel, self.factorization_rank ), b.view(batch, self.factorization_rank, in_channel) m = torch.bmm(a, b).view(batch, self.out_channel, in_channel, 1, 1) if self.use_sigmoid: m = F.sigmoid(m) weight = self.scale * weight * m if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_4, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_5, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_2, buf2, buf5, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_2, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0 ], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5): if input.device.type == 'cpu': if bias is not None: rest_dim = [1] * (input.ndim - bias.ndim - 1) return F.leaky_relu(input + bias.view(1, bias.shape[0], * rest_dim), negative_slope=0.2) * scale else: return F.leaky_relu(input, negative_slope=0.2) * scale else: return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, bias, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) if bias: grad_bias = grad_input.sum(dim).detach() else: grad_bias = None return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) if bias is None: bias = empty ctx.bias = bias is not None out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, modulation_type='style', factorization_rank=5, num_kernels=1, use_sigmoid=False, demodulate=True, upsample=False, downsample= False, blur_kernel=[1, 3, 3, 1]): super().__init__() assert modulation_type in ['style', 'factorized'] assert num_kernels > 0 self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample self.factorization_rank = factorization_rank self.use_sigmoid = use_sigmoid self.modulation_type = modulation_type self.num_kernels = num_kernels if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(num_kernels, out_channel, in_channel, kernel_size, kernel_size)) if num_kernels > 1: self.kernel_attention = EqualLinear(style_dim, num_kernels, bias_init=0, lr_mul=1.0) if modulation_type == 'style': self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) if modulation_type == 'factorized': if use_sigmoid: self.modulation = EqualLinear(style_dim, (in_channel + out_channel) * self.factorization_rank, bias_init=0) else: self.modulation = EqualLinear(style_dim, (in_channel + out_channel) * self.factorization_rank, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_2 = self.weight primals_3 = self.modulation.weight primals_4 = self.modulation.bias primals_1 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
SavvaI/stylegan2-pytorch
ModulatedConv2d
false
9,497
[ "MIT", "BSD-2-Clause", "Apache-2.0" ]
0
b8e4b605bd951283ef2c9a784e7afa0a486975bb
https://github.com/SavvaI/stylegan2-pytorch/tree/b8e4b605bd951283ef2c9a784e7afa0a486975bb
ScaledL2Norm
import torch import torch.onnx import torch import torch.nn as nn import torch.nn.functional as F class ScaledL2Norm(nn.Module): def __init__(self, in_channels, initial_scale): super(ScaledL2Norm, self).__init__() self.in_channels = in_channels self.scale = nn.Parameter(torch.Tensor(in_channels)) self.initial_scale = initial_scale self.reset_parameters() def forward(self, x): return F.normalize(x, p=2, dim=1) * self.scale.unsqueeze(0).unsqueeze(2 ).unsqueeze(3) def reset_parameters(self): self.scale.data.fill_(self.initial_scale) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'initial_scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.onnx import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class ScaledL2NormNew(nn.Module): def __init__(self, in_channels, initial_scale): super(ScaledL2NormNew, self).__init__() self.in_channels = in_channels self.scale = nn.Parameter(torch.Tensor(in_channels)) self.initial_scale = initial_scale self.reset_parameters() def reset_parameters(self): self.scale.data.fill_(self.initial_scale) def forward(self, input_0): primals_2 = self.scale primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
SoonminHwang/pytorch-ssd
ScaledL2Norm
false
9,498
[ "MIT" ]
0
1d6b9427a4b649bc2ce85a82511b9dd299f9d3e8
https://github.com/SoonminHwang/pytorch-ssd/tree/1d6b9427a4b649bc2ce85a82511b9dd299f9d3e8
RobustScannerFusionLayer
import torch import torch.nn as nn class RobustScannerFusionLayer(nn.Module): def __init__(self, dim_model, dim=-1): super().__init__() self.dim_model = dim_model self.dim = dim self.linear_layer = nn.Linear(dim_model * 2, dim_model * 2) self.glu_layer = nn.GLU(dim=dim) def forward(self, x0, x1): assert x0.size() == x1.size() fusion_input = torch.cat([x0, x1], self.dim) output = self.linear_layer(fusion_input) output = self.glu_layer(output) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (8, 8), (8, 1)) assert_size_stride(primals_4, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 8), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_glu_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(buf0, (64, 8), (8, 1), 0 ), reinterpret_tensor(buf1, (4, 4, 4, 8), (128, 32, 8, 1), 0) class RobustScannerFusionLayerNew(nn.Module): def __init__(self, dim_model, dim=-1): super().__init__() self.dim_model = dim_model self.dim = dim self.linear_layer = nn.Linear(dim_model * 2, dim_model * 2) self.glu_layer = nn.GLU(dim=dim) def forward(self, input_0, input_1): primals_3 = self.linear_layer.weight primals_4 = self.linear_layer.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
SamDM/mmocr
RobustScannerFusionLayer
false
9,499
[ "Apache-2.0" ]
0
4cb69141ff8d28c8b1437bf28242e368a0e6ec4f
https://github.com/SamDM/mmocr/tree/4cb69141ff8d28c8b1437bf28242e368a0e6ec4f
MeanReweightLayer
import torch import torch.nn as nn import torch.nn.parallel from torch.nn.parameter import Parameter class MeanReweightLayer(nn.Module): """Renamed to Attention-Bias (AB) layer in paper""" def __init__(self, channel): super(MeanReweightLayer, self).__init__() self.cfc = Parameter(torch.Tensor(channel)) self.cfc.data.fill_(0) def forward(self, x): avg_y = torch.mean(x, dim=(2, 3), keepdim=True) avg_y = avg_y * self.cfc[None, :, None, None] return x + avg_y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tmp8 = tmp6 * tmp7 tmp9 = tmp0 + tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_0[grid(16)](buf1, primals_1, primals_2, buf2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 del primals_2 return buf2, buf1 class MeanReweightLayerNew(nn.Module): """Renamed to Attention-Bias (AB) layer in paper""" def __init__(self, channel): super(MeanReweightLayerNew, self).__init__() self.cfc = Parameter(torch.Tensor(channel)) self.cfc.data.fill_(0) def forward(self, input_0): primals_2 = self.cfc primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
SanderKlomp/channel-attention
MeanReweightLayer
false
9,500
[ "MIT" ]
0
9dfdb28f3ad4de13b4c076d1423f21c05c907bd7
https://github.com/SanderKlomp/channel-attention/tree/9dfdb28f3ad4de13b4c076d1423f21c05c907bd7
Upsampler
import math import torch from torchvision.transforms import * class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class Upsampler(torch.nn.Module): def __init__(self, scale, n_feat, bn=False, act='prelu', bias=True): super(Upsampler, self).__init__() modules = [] for _ in range(int(math.log(scale, 2))): modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None)) modules.append(torch.nn.PixelShuffle(2)) if bn: modules.append(torch.nn.BatchNorm2d(n_feat)) self.up = torch.nn.Sequential(*modules) self.activation = act if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): out = self.up(x) if self.activation is not None: out = self.act(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0, 'n_feat': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class UpsamplerNew(torch.nn.Module): def __init__(self, scale, n_feat, bn=False, act='prelu', bias=True): super(UpsamplerNew, self).__init__() modules = [] for _ in range(int(math.log(scale, 2))): modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None)) modules.append(torch.nn.PixelShuffle(2)) if bn: modules.append(torch.nn.BatchNorm2d(n_feat)) self.up = torch.nn.Sequential(*modules) self.activation = act if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, input_0): primals_2 = self.act.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
EvgeneyZ/RBPN
Upsampler
false
9,501
[ "MIT" ]
0
acfe636cc48a4fbfea78f934a251c32e53367659
https://github.com/EvgeneyZ/RBPN/tree/acfe636cc48a4fbfea78f934a251c32e53367659
GAT
import torch import torch.nn as nn import torch.nn.functional as F from scipy.sparse import * def dropout(x, drop_prob, shared_axes=[], training=False): """ Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied. """ if drop_prob == 0 or drop_prob is None or not training: return x sz = list(x.size()) for i in shared_axes: sz[i] = 1 mask = x.new(*sz).bernoulli_(1.0 - drop_prob).div_(1.0 - drop_prob) mask = mask.expand_as(x) return x * mask class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha=0.2, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a1 = nn.Parameter(torch.zeros(size=(out_features, 1))) nn.init.xavier_uniform_(self.a1.data, gain=1.414) self.a2 = nn.Parameter(torch.zeros(size=(out_features, 1))) nn.init.xavier_uniform_(self.a2.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) h.size()[0] a_input1 = torch.matmul(h, self.a1) a_input2 = torch.matmul(h, self.a2) e = self.leakyrelu(a_input1 + a_input2.transpose(-1, -2)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(GAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout= dropout, alpha=alpha, concat=False) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout, training=self.training) x = F.elu(self.out_att(x, adj)) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = tl.load(in_ptr3 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr3 + 1) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp22 = tl.load(in_ptr3 + 2) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp31 = tl.load(in_ptr3 + 3) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp38 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp39 = tl.load(in_ptr5 + x0, xmask) tmp40 = tl.load(in_ptr6 + 0) tmp41 = tl.broadcast_to(tmp40, [XBLOCK]) tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp47 = tl.load(in_ptr6 + 1) tmp48 = tl.broadcast_to(tmp47, [XBLOCK]) tmp54 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp55 = tl.load(in_ptr6 + 2) tmp56 = tl.broadcast_to(tmp55, [XBLOCK]) tmp62 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp63 = tl.load(in_ptr6 + 3) tmp64 = tl.broadcast_to(tmp63, [XBLOCK]) tmp70 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp71 = tl.load(in_ptr8 + x0, xmask) tmp72 = tl.load(in_ptr9 + 0) tmp73 = tl.broadcast_to(tmp72, [XBLOCK]) tmp78 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp79 = tl.load(in_ptr9 + 1) tmp80 = tl.broadcast_to(tmp79, [XBLOCK]) tmp86 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp87 = tl.load(in_ptr9 + 2) tmp88 = tl.broadcast_to(tmp87, [XBLOCK]) tmp94 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp95 = tl.load(in_ptr9 + 3) tmp96 = tl.broadcast_to(tmp95, [XBLOCK]) tmp102 = tl.load(in_ptr10 + 4 * x0, xmask, eviction_policy='evict_last' ).to(tl.int1) tmp103 = tl.load(in_ptr11 + x0, xmask) tmp104 = tl.load(in_ptr12 + 0) tmp105 = tl.broadcast_to(tmp104, [XBLOCK]) tmp110 = tl.load(in_ptr10 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp111 = tl.load(in_ptr12 + 1) tmp112 = tl.broadcast_to(tmp111, [XBLOCK]) tmp118 = tl.load(in_ptr10 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp119 = tl.load(in_ptr12 + 2) tmp120 = tl.broadcast_to(tmp119, [XBLOCK]) tmp126 = tl.load(in_ptr10 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp127 = tl.load(in_ptr12 + 3) tmp128 = tl.broadcast_to(tmp127, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp1, tmp5, tmp7) tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp0, tmp8, tmp9) tmp15 = tmp2 + tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = tl.where(tmp11, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp24 = tmp2 + tmp23 tmp25 = tmp24 * tmp6 tmp26 = tl.where(tmp21, tmp24, tmp25) tmp27 = tl.where(tmp20, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp33 = tmp2 + tmp32 tmp34 = tmp33 * tmp6 tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tmp42 = tmp39 + tmp41 tmp43 = tmp42 * tmp6 tmp44 = tl.where(tmp38, tmp42, tmp43) tmp45 = tl.where(tmp0, tmp44, tmp9) tmp49 = tmp39 + tmp48 tmp50 = tmp49 * tmp6 tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tl.where(tmp11, tmp51, tmp9) tmp53 = triton_helpers.maximum(tmp45, tmp52) tmp57 = tmp39 + tmp56 tmp58 = tmp57 * tmp6 tmp59 = tl.where(tmp54, tmp57, tmp58) tmp60 = tl.where(tmp20, tmp59, tmp9) tmp61 = triton_helpers.maximum(tmp53, tmp60) tmp65 = tmp39 + tmp64 tmp66 = tmp65 * tmp6 tmp67 = tl.where(tmp62, tmp65, tmp66) tmp68 = tl.where(tmp29, tmp67, tmp9) tmp69 = triton_helpers.maximum(tmp61, tmp68) tmp74 = tmp71 + tmp73 tmp75 = tmp74 * tmp6 tmp76 = tl.where(tmp70, tmp74, tmp75) tmp77 = tl.where(tmp0, tmp76, tmp9) tmp81 = tmp71 + tmp80 tmp82 = tmp81 * tmp6 tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tl.where(tmp11, tmp83, tmp9) tmp85 = triton_helpers.maximum(tmp77, tmp84) tmp89 = tmp71 + tmp88 tmp90 = tmp89 * tmp6 tmp91 = tl.where(tmp86, tmp89, tmp90) tmp92 = tl.where(tmp20, tmp91, tmp9) tmp93 = triton_helpers.maximum(tmp85, tmp92) tmp97 = tmp71 + tmp96 tmp98 = tmp97 * tmp6 tmp99 = tl.where(tmp94, tmp97, tmp98) tmp100 = tl.where(tmp29, tmp99, tmp9) tmp101 = triton_helpers.maximum(tmp93, tmp100) tmp106 = tmp103 + tmp105 tmp107 = tmp106 * tmp6 tmp108 = tl.where(tmp102, tmp106, tmp107) tmp109 = tl.where(tmp0, tmp108, tmp9) tmp113 = tmp103 + tmp112 tmp114 = tmp113 * tmp6 tmp115 = tl.where(tmp110, tmp113, tmp114) tmp116 = tl.where(tmp11, tmp115, tmp9) tmp117 = triton_helpers.maximum(tmp109, tmp116) tmp121 = tmp103 + tmp120 tmp122 = tmp121 * tmp6 tmp123 = tl.where(tmp118, tmp121, tmp122) tmp124 = tl.where(tmp20, tmp123, tmp9) tmp125 = triton_helpers.maximum(tmp117, tmp124) tmp129 = tmp103 + tmp128 tmp130 = tmp129 * tmp6 tmp131 = tl.where(tmp126, tmp129, tmp130) tmp132 = tl.where(tmp29, tmp131, tmp9) tmp133 = triton_helpers.maximum(tmp125, tmp132) tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp69, xmask) tl.store(out_ptr2 + x0, tmp101, xmask) tl.store(out_ptr3 + x0, tmp133, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x2, xmask).to(tl.int1) tmp14 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr9 + x2, xmask).to(tl.int1) tmp24 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr11 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr13 + x2, xmask).to(tl.int1) tmp34 = tl.load(in_ptr14 + x1, xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr15 + x0, xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr16 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tmp16 * tmp5 tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tl.where(tmp0, tmp18, tmp8) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp26 = tmp24 + tmp25 tmp27 = tmp26 * tmp5 tmp28 = tl.where(tmp23, tmp26, tmp27) tmp29 = tl.where(tmp0, tmp28, tmp8) tmp31 = tmp29 - tmp30 tmp32 = tl_math.exp(tmp31) tmp36 = tmp34 + tmp35 tmp37 = tmp36 * tmp5 tmp38 = tl.where(tmp33, tmp36, tmp37) tmp39 = tl.where(tmp0, tmp38, tmp8) tmp41 = tmp39 - tmp40 tmp42 = tl_math.exp(tmp41) tl.store(out_ptr0 + x2, tmp12, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) tl.store(out_ptr2 + x2, tmp32, xmask) tl.store(out_ptr3 + x2, tmp42, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x2, tmp52, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = tl.load(in_ptr3 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr3 + 1) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp22 = tl.load(in_ptr3 + 2) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp31 = tl.load(in_ptr3 + 3) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp1, tmp5, tmp7) tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp0, tmp8, tmp9) tmp15 = tmp2 + tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = tl.where(tmp11, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp24 = tmp2 + tmp23 tmp25 = tmp24 * tmp6 tmp26 = tl.where(tmp21, tmp24, tmp25) tmp27 = tl.where(tmp20, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp33 = tmp2 + tmp32 tmp34 = tmp33 * tmp6 tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tl.store(out_ptr0 + x0, tmp37, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_elu_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 1), (1, 1)) assert_size_stride(primals_8, (4, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, 1), (1, 1)) assert_size_stride(primals_11, (4, 1), (1, 1)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4, 1), (1, 1)) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (4, 1), (1, 1)) assert_size_stride(primals_17, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_3, out=buf1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_4, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_gt_1[grid(16)](primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_6, out=buf9) del primals_6 buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_7, out=buf10) buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_8, out=buf11) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf10, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_9, out=buf17) del primals_9 buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, primals_10, out=buf18) buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, primals_11, out=buf19) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf18, buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_12, out=buf25) del primals_12 buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, primals_13, out=buf26) buf27 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, primals_14, out=buf27) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf26, buf27, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_2[grid(4)](buf4, buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19, buf28, buf26, buf27, buf5, buf13, buf21, buf29, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_3[grid(16)](buf4, buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20, buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14, buf22, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf10 del buf11 del buf13 del buf18 del buf19 del buf2 del buf21 del buf26 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf6 del buf6 extern_kernels.mm(buf7, buf0, out=buf8) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf14 del buf14 extern_kernels.mm(buf15, buf9, out=buf16) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = buf22 del buf22 extern_kernels.mm(buf23, buf17, out=buf24) buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf30, buf31, 16, XBLOCK=16, num_warps=1, num_stages=1) buf32 = buf30 del buf30 extern_kernels.mm(buf31, buf25, out=buf32) buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_cat_5[grid(64)](buf8, buf16, buf24, buf32, buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf33, primals_15, out=buf34) buf35 = reinterpret_tensor(buf5, (4, 1), (1, 1), 0) del buf5 extern_kernels.mm(buf34, primals_16, out=buf35) buf36 = reinterpret_tensor(buf29, (4, 1), (1, 1), 0) del buf29 extern_kernels.mm(buf34, primals_17, out=buf36) buf37 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf35, buf36, buf37, 16, XBLOCK=16, num_warps=1, num_stages=1) buf38 = reinterpret_tensor(buf27, (4, 1), (1, 4), 0) del buf27 triton_poi_fused__softmax_add_leaky_relu_mul_where_6[grid(4)](buf4, buf37, buf35, buf36, buf38, 4, XBLOCK=4, num_warps=1, num_stages=1) buf39 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_7[grid(16)](buf4, buf37, buf35, buf36, buf38, buf39, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf35 del buf36 del buf38 buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf39, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = buf39 del buf39 extern_kernels.mm(buf40, buf34, out=buf41) buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_elu_8[grid(16)](buf41, buf42, 16, XBLOCK=16, num_warps=1, num_stages=1) return (buf42, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41, reinterpret_tensor(buf34, (4, 4), (1, 4), 0), reinterpret_tensor( primals_17, (1, 4), (1, 1), 0), reinterpret_tensor(primals_16, (1, 4), (1, 1), 0), reinterpret_tensor(buf33, (16, 4), (1, 16), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor( primals_14, (1, 4), (1, 1), 0), reinterpret_tensor(primals_13, (1, 4), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor( primals_11, (1, 4), (1, 1), 0), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), reinterpret_tensor(primals_7, (1, 4), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor( primals_4, (1, 4), (1, 1), 0), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)) def dropout(x, drop_prob, shared_axes=[], training=False): """ Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied. """ if drop_prob == 0 or drop_prob is None or not training: return x sz = list(x.size()) for i in shared_axes: sz[i] = 1 mask = x.new(*sz).bernoulli_(1.0 - drop_prob).div_(1.0 - drop_prob) mask = mask.expand_as(x) return x * mask class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha=0.2, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a1 = nn.Parameter(torch.zeros(size=(out_features, 1))) nn.init.xavier_uniform_(self.a1.data, gain=1.414) self.a2 = nn.Parameter(torch.zeros(size=(out_features, 1))) nn.init.xavier_uniform_(self.a2.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) h.size()[0] a_input1 = torch.matmul(h, self.a1) a_input2 = torch.matmul(h, self.a2) e = self.leakyrelu(a_input1 + a_input2.transpose(-1, -2)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(GATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout= dropout, alpha=alpha, concat=False) def forward(self, input_0, input_1): primals_1 = self.attention_0.W primals_3 = self.attention_0.a1 primals_4 = self.attention_0.a2 primals_2 = self.attention_1.W primals_7 = self.attention_1.a1 primals_8 = self.attention_1.a2 primals_5 = self.attention_2.W primals_10 = self.attention_2.a1 primals_11 = self.attention_2.a2 primals_6 = self.attention_3.W primals_13 = self.attention_3.a1 primals_14 = self.attention_3.a2 primals_15 = self.out_att.W primals_16 = self.out_att.a1 primals_17 = self.out_att.a2 primals_9 = input_0 primals_12 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
Ononoki-Yotsugi/IDGL
GAT
false
9,502
[ "Apache-2.0" ]
0
a99f840681a4ae26c2740ed9e9302d4e15a68c7f
https://github.com/Ononoki-Yotsugi/IDGL/tree/a99f840681a4ae26c2740ed9e9302d4e15a68c7f
MLPAttention
import torch import torch.nn.functional as F from torch import nn from typing import Optional import torch.optim def get_activation_fn(name: 'Optional[str]'): """Returns a callable activation function from `torch`.""" if name in (None, 'linear'): return lambda x: x elif name in ('sigmoid', 'tanh'): return getattr(torch, name) else: return getattr(F, name) class DotAttention(nn.Module): """Attention layer with dot product.""" def __init__(self, ctx_dim, hid_dim, att_bottleneck='ctx', transform_ctx=True, att_activ='tanh', temp=1.0, ctx2hid=True, mlp_bias=None): super().__init__() self.ctx_dim = ctx_dim self.hid_dim = hid_dim self._ctx2hid = ctx2hid self.temperature = temp self.activ = get_activation_fn(att_activ) if isinstance(att_bottleneck, int): self.mid_dim = att_bottleneck else: self.mid_dim = getattr(self, '{}_dim'.format(att_bottleneck)) self.hid2ctx = nn.Linear(self.hid_dim, self.mid_dim, bias=False) if transform_ctx or self.mid_dim != self.ctx_dim: self.ctx2ctx = nn.Linear(self.ctx_dim, self.mid_dim, bias=False) else: self.ctx2ctx = lambda x: x if self._ctx2hid: self.ctx2hid = nn.Linear(self.ctx_dim, self.hid_dim, bias=False) else: self.ctx2hid = lambda x: x def forward(self, hid, ctx, ctx_mask=None): """Computes attention probabilities and final context using decoder's hidden state and source annotations. Arguments: hid(Tensor): A set of decoder hidden states of shape `T*B*H` where `T` == 1, `B` is batch dim and `H` is hidden state dim. ctx(Tensor): A set of annotations of shape `S*B*C` where `S` is the source timestep dim, `B` is batch dim and `C` is annotation dim. ctx_mask(FloatTensor): A binary mask of shape `S*B` with zeroes in the padded positions. Returns: scores(Tensor): A tensor of shape `S*B` containing normalized attention scores for each position and sample. z_t(Tensor): A tensor of shape `B*H` containing the final attended context vector for this target decoding timestep. Notes: This will only work when `T==1` for now. """ ctx_ = self.ctx2ctx(ctx) hid_ = self.hid2ctx(hid) scores = torch.bmm(hid_.permute(1, 0, 2), ctx_.permute(1, 2, 0)).div( self.temperature).squeeze(1).t() if ctx_mask is not None: scores.masked_fill_((1 - ctx_mask).bool(), -100000000.0) alpha = F.softmax(scores, dim=0) return alpha, self.ctx2hid((alpha.unsqueeze(-1) * ctx).sum(0)) class MLPAttention(DotAttention): """Attention layer with feed-forward layer.""" def __init__(self, ctx_dim, hid_dim, att_bottleneck='ctx', transform_ctx=True, att_activ='tanh', mlp_bias=False, temp=1.0, ctx2hid=True): super().__init__(ctx_dim, hid_dim, att_bottleneck, transform_ctx, att_activ, temp, ctx2hid) if mlp_bias: self.bias = nn.Parameter(torch.Tensor(self.mid_dim)) self.bias.data.zero_() else: self.register_parameter('bias', None) self.mlp = nn.Linear(self.mid_dim, 1, bias=False) def forward(self, hid, ctx, ctx_mask=None): """Computes attention probabilities and final context using decoder's hidden state and source annotations. Arguments: hid(Tensor): A set of decoder hidden states of shape `T*B*H` where `T` == 1, `B` is batch dim and `H` is hidden state dim. ctx(Tensor): A set of annotations of shape `S*B*C` where `S` is the source timestep dim, `B` is batch dim and `C` is annotation dim. ctx_mask(FloatTensor): A binary mask of shape `S*B` with zeroes in the padded positions. Returns: scores(Tensor): A tensor of shape `S*B` containing normalized attention scores for each position and sample. z_t(Tensor): A tensor of shape `B*H` containing the final attended context vector for this target decoding timestep. Notes: This will only work when `T==1` for now. """ inner_sum = self.ctx2ctx(ctx) + self.hid2ctx(hid) if self.bias is not None: inner_sum.add_(self.bias) scores = self.mlp(self.activ(inner_sum)).div(self.temperature).squeeze( -1) if ctx_mask is not None: scores.masked_fill_((1 - ctx_mask).bool(), -100000000.0) alpha = F.softmax(scores, dim=0) return alpha, self.ctx2hid((alpha.unsqueeze(-1) * ctx).sum(0)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ctx_dim': 4, 'hid_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F from torch import nn from typing import Optional import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr0 + (16 + x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (64 + x2), xmask) tmp7 = tl.load(in_ptr0 + (32 + x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (128 + x2), xmask) tmp11 = tl.load(in_ptr0 + (48 + x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (192 + x2), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](buf2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused_mul_sum_3[grid(64)](buf5, primals_2, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf7) return buf5, reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0 ), primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), buf2, buf5, reinterpret_tensor(buf6, (16, 4), (4, 1), 0 ), primals_6, primals_5 def get_activation_fn(name: 'Optional[str]'): """Returns a callable activation function from `torch`.""" if name in (None, 'linear'): return lambda x: x elif name in ('sigmoid', 'tanh'): return getattr(torch, name) else: return getattr(F, name) class DotAttention(nn.Module): """Attention layer with dot product.""" def __init__(self, ctx_dim, hid_dim, att_bottleneck='ctx', transform_ctx=True, att_activ='tanh', temp=1.0, ctx2hid=True, mlp_bias=None): super().__init__() self.ctx_dim = ctx_dim self.hid_dim = hid_dim self._ctx2hid = ctx2hid self.temperature = temp self.activ = get_activation_fn(att_activ) if isinstance(att_bottleneck, int): self.mid_dim = att_bottleneck else: self.mid_dim = getattr(self, '{}_dim'.format(att_bottleneck)) self.hid2ctx = nn.Linear(self.hid_dim, self.mid_dim, bias=False) if transform_ctx or self.mid_dim != self.ctx_dim: self.ctx2ctx = nn.Linear(self.ctx_dim, self.mid_dim, bias=False) else: self.ctx2ctx = lambda x: x if self._ctx2hid: self.ctx2hid = nn.Linear(self.ctx_dim, self.hid_dim, bias=False) else: self.ctx2hid = lambda x: x def forward(self, hid, ctx, ctx_mask=None): """Computes attention probabilities and final context using decoder's hidden state and source annotations. Arguments: hid(Tensor): A set of decoder hidden states of shape `T*B*H` where `T` == 1, `B` is batch dim and `H` is hidden state dim. ctx(Tensor): A set of annotations of shape `S*B*C` where `S` is the source timestep dim, `B` is batch dim and `C` is annotation dim. ctx_mask(FloatTensor): A binary mask of shape `S*B` with zeroes in the padded positions. Returns: scores(Tensor): A tensor of shape `S*B` containing normalized attention scores for each position and sample. z_t(Tensor): A tensor of shape `B*H` containing the final attended context vector for this target decoding timestep. Notes: This will only work when `T==1` for now. """ ctx_ = self.ctx2ctx(ctx) hid_ = self.hid2ctx(hid) scores = torch.bmm(hid_.permute(1, 0, 2), ctx_.permute(1, 2, 0)).div( self.temperature).squeeze(1).t() if ctx_mask is not None: scores.masked_fill_((1 - ctx_mask).bool(), -100000000.0) alpha = F.softmax(scores, dim=0) return alpha, self.ctx2hid((alpha.unsqueeze(-1) * ctx).sum(0)) class MLPAttentionNew(DotAttention): """Attention layer with feed-forward layer.""" def __init__(self, ctx_dim, hid_dim, att_bottleneck='ctx', transform_ctx=True, att_activ='tanh', mlp_bias=False, temp=1.0, ctx2hid=True): super().__init__(ctx_dim, hid_dim, att_bottleneck, transform_ctx, att_activ, temp, ctx2hid) if mlp_bias: self.bias = nn.Parameter(torch.Tensor(self.mid_dim)) self.bias.data.zero_() else: self.register_parameter('bias', None) self.mlp = nn.Linear(self.mid_dim, 1, bias=False) def forward(self, input_0, input_1): primals_1 = self.hid2ctx.weight primals_3 = self.ctx2ctx.weight primals_6 = self.ctx2hid.weight primals_5 = self.mlp.weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
Nickeilf/pysimt
MLPAttention
false
9,503
[ "MIT" ]
0
05c8de92d0e2b930e40939ad3695d8d2c2954dda
https://github.com/Nickeilf/pysimt/tree/05c8de92d0e2b930e40939ad3695d8d2c2954dda
ECALayer
import torch import torch.nn as nn import torch.nn.parallel class ECALayer(nn.Module): """Constructs a ECA module. Args: channel: Number of channels of the input feature map k_size: Adaptive selection of kernel size """ def __init__(self, channel, k_size=3): super(ECALayer, self).__init__() self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1 ) // 2, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): _b, _c, _h, _w = x.size() y = torch.mean(x, dim=(2, 3), keepdim=True) y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2 ).unsqueeze(-1) y = self.sigmoid(y) return x * y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 1, 4 ), (4, 0, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4), (4, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_1[grid(256)](primals_1, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0), buf2 class ECALayerNew(nn.Module): """Constructs a ECA module. Args: channel: Number of channels of the input feature map k_size: Adaptive selection of kernel size """ def __init__(self, channel, k_size=3): super(ECALayerNew, self).__init__() self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1 ) // 2, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
SanderKlomp/channel-attention
ECALayer
false
9,504
[ "MIT" ]
0
9dfdb28f3ad4de13b4c076d1423f21c05c907bd7
https://github.com/SanderKlomp/channel-attention/tree/9dfdb28f3ad4de13b4c076d1423f21c05c907bd7
ConvAE
import math import torch import torch.nn as nn import torch.nn.functional as F class Conv2dSamePad(nn.Module): """ Implement Tensorflow's 'SAME' padding mode in Conv2d. When an odd number, say `m`, of pixels are need to pad, Tensorflow will pad one more column at right or one more row at bottom. But Pytorch will pad `m+1` pixels, i.e., Pytorch always pads in both sides. So we can pad the tensor in the way of Tensorflow before call the Conv2d module. """ def __init__(self, kernel_size, stride): super(Conv2dSamePad, self).__init__() self.kernel_size = kernel_size if type(kernel_size) in [list, tuple ] else [kernel_size, kernel_size] self.stride = stride if type(stride) in [list, tuple] else [stride, stride] def forward(self, x): in_height = x.size(2) in_width = x.size(3) out_height = math.ceil(float(in_height) / float(self.stride[0])) out_width = math.ceil(float(in_width) / float(self.stride[1])) pad_along_height = (out_height - 1) * self.stride[0 ] + self.kernel_size[0] - in_height pad_along_width = (out_width - 1) * self.stride[1] + self.kernel_size[1 ] - in_width pad_top = math.floor(pad_along_height / 2) pad_left = math.floor(pad_along_width / 2) pad_bottom = pad_along_height - pad_top pad_right = pad_along_width - pad_left return F.pad(x, [pad_left, pad_right, pad_top, pad_bottom], 'constant', 0) class ConvTranspose2dSamePad(nn.Module): """ This module implements the "SAME" padding mode for ConvTranspose2d as in Tensorflow. A tensor with width w_in, feed it to ConvTranspose2d(ci, co, kernel, stride), the width of output tensor T_nopad: w_nopad = (w_in - 1) * stride + kernel If we use padding, i.e., ConvTranspose2d(ci, co, kernel, stride, padding, output_padding), the width of T_pad: w_pad = (w_in - 1) * stride + kernel - (2*padding - output_padding) = w_nopad - (2*padding - output_padding) Yes, in ConvTranspose2d, more padding, the resulting tensor is smaller, i.e., the padding is actually deleting row/col. If `pad`=(2*padding - output_padding) is odd, Pytorch deletes more columns in the left, i.e., the first ceil(pad/2) and last `pad - ceil(pad/2)` columns of T_nopad are deleted to get T_pad. In contrast, Tensorflow deletes more columns in the right, i.e., the first floor(pad/2) and last `pad - floor(pad/2)` columns are deleted. For the height, Pytorch deletes more rows at top, while Tensorflow at bottom. In practice, we usually want `w_pad = w_in * stride` or `w_pad = w_in * stride - 1`, i.e., the "SAME" padding mode in Tensorflow. To determine the value of `w_pad`, we should pass it to this function. So the number of columns to delete: pad = 2*padding - output_padding = w_nopad - w_pad If pad is even, we can directly set padding=pad/2 and output_padding=0 in ConvTranspose2d. If pad is odd, we can use ConvTranspose2d to get T_nopad, and then delete `pad` rows/columns by ourselves. This module should be called after the ConvTranspose2d module with shared kernel_size and stride values. """ def __init__(self, output_size): super(ConvTranspose2dSamePad, self).__init__() self.output_size = output_size def forward(self, x): in_height = x.size(2) in_width = x.size(3) pad_height = in_height - self.output_size[0] pad_width = in_width - self.output_size[1] pad_top = pad_height // 2 pad_bottom = pad_height - pad_top pad_left = pad_width // 2 pad_right = pad_width - pad_left return x[:, :, pad_top:in_height - pad_bottom, pad_left:in_width - pad_right] class ConvAE(nn.Module): def __init__(self, channels, kernels): """ :param channels: a list containing all channels including the input image channel (1 for gray, 3 for RGB) :param kernels: a list containing all kernel sizes, it should satisfy: len(kernels) = len(channels) - 1. """ super(ConvAE, self).__init__() assert isinstance(channels, list) and isinstance(kernels, list) self.encoder = nn.Sequential() for i in range(1, len(channels)): self.encoder.add_module('pad%d' % i, Conv2dSamePad(kernels[i - 1], 2)) self.encoder.add_module('conv%d' % i, nn.Conv2d(channels[i - 1], channels[i], kernel_size=kernels[i - 1], stride=2)) self.encoder.add_module('relu%d' % i, nn.ReLU(True)) self.decoder = nn.Sequential() channels = list(reversed(channels)) kernels = list(reversed(kernels)) sizes = [[12, 11], [24, 21], [48, 42]] for i in range(len(channels) - 1): self.decoder.add_module('deconv%d' % (i + 1), nn. ConvTranspose2d(channels[i], channels[i + 1], kernel_size= kernels[i], stride=2)) self.decoder.add_module('padd%d' % i, ConvTranspose2dSamePad( sizes[i])) self.decoder.add_module('relud%d' % i, nn.ReLU(True)) def forward(self, x): h = self.encoder(x) y = self.decoder(h) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': [4, 4], 'kernels': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x4 = xindex x2 = xindex // 36 % 4 tmp19 = tl.load(in_out_ptr0 + x4, xmask) tmp20 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp4 & tmp2 tmp6 = tl.load(in_out_ptr0 + x4, tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr0 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp5, tmp10, tmp11) tmp13 = tl.load(in_out_ptr0 + x4, tmp2 & xmask, other=0.0) tmp14 = tl.load(in_ptr0 + x2, tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.where(tmp4, tmp12, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp2, tmp16, tmp17) tmp21 = tmp19 + tmp20 tmp22 = tl.where(tmp2, tmp18, tmp21) tl.store(in_out_ptr0 + x4, tmp22, xmask) @triton.jit def triton_poi_fused_threshold_backward_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 3 x2 = xindex // 9 x3 = xindex tmp0 = tl.load(in_ptr0 + (21 + x0 + 6 * x1 + 36 * x2), xmask) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 6, 6), (144, 36, 6, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_2[grid(576)](buf4, primals_5, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool) triton_poi_fused_threshold_backward_3[grid(144)](buf4, buf5, 144, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf4, (4, 4, 3, 3), (144, 36, 6, 1), 21 ), primals_2, primals_4, buf0, buf2, buf5 class Conv2dSamePad(nn.Module): """ Implement Tensorflow's 'SAME' padding mode in Conv2d. When an odd number, say `m`, of pixels are need to pad, Tensorflow will pad one more column at right or one more row at bottom. But Pytorch will pad `m+1` pixels, i.e., Pytorch always pads in both sides. So we can pad the tensor in the way of Tensorflow before call the Conv2d module. """ def __init__(self, kernel_size, stride): super(Conv2dSamePad, self).__init__() self.kernel_size = kernel_size if type(kernel_size) in [list, tuple ] else [kernel_size, kernel_size] self.stride = stride if type(stride) in [list, tuple] else [stride, stride] def forward(self, x): in_height = x.size(2) in_width = x.size(3) out_height = math.ceil(float(in_height) / float(self.stride[0])) out_width = math.ceil(float(in_width) / float(self.stride[1])) pad_along_height = (out_height - 1) * self.stride[0 ] + self.kernel_size[0] - in_height pad_along_width = (out_width - 1) * self.stride[1] + self.kernel_size[1 ] - in_width pad_top = math.floor(pad_along_height / 2) pad_left = math.floor(pad_along_width / 2) pad_bottom = pad_along_height - pad_top pad_right = pad_along_width - pad_left return F.pad(x, [pad_left, pad_right, pad_top, pad_bottom], 'constant', 0) class ConvTranspose2dSamePad(nn.Module): """ This module implements the "SAME" padding mode for ConvTranspose2d as in Tensorflow. A tensor with width w_in, feed it to ConvTranspose2d(ci, co, kernel, stride), the width of output tensor T_nopad: w_nopad = (w_in - 1) * stride + kernel If we use padding, i.e., ConvTranspose2d(ci, co, kernel, stride, padding, output_padding), the width of T_pad: w_pad = (w_in - 1) * stride + kernel - (2*padding - output_padding) = w_nopad - (2*padding - output_padding) Yes, in ConvTranspose2d, more padding, the resulting tensor is smaller, i.e., the padding is actually deleting row/col. If `pad`=(2*padding - output_padding) is odd, Pytorch deletes more columns in the left, i.e., the first ceil(pad/2) and last `pad - ceil(pad/2)` columns of T_nopad are deleted to get T_pad. In contrast, Tensorflow deletes more columns in the right, i.e., the first floor(pad/2) and last `pad - floor(pad/2)` columns are deleted. For the height, Pytorch deletes more rows at top, while Tensorflow at bottom. In practice, we usually want `w_pad = w_in * stride` or `w_pad = w_in * stride - 1`, i.e., the "SAME" padding mode in Tensorflow. To determine the value of `w_pad`, we should pass it to this function. So the number of columns to delete: pad = 2*padding - output_padding = w_nopad - w_pad If pad is even, we can directly set padding=pad/2 and output_padding=0 in ConvTranspose2d. If pad is odd, we can use ConvTranspose2d to get T_nopad, and then delete `pad` rows/columns by ourselves. This module should be called after the ConvTranspose2d module with shared kernel_size and stride values. """ def __init__(self, output_size): super(ConvTranspose2dSamePad, self).__init__() self.output_size = output_size def forward(self, x): in_height = x.size(2) in_width = x.size(3) pad_height = in_height - self.output_size[0] pad_width = in_width - self.output_size[1] pad_top = pad_height // 2 pad_bottom = pad_height - pad_top pad_left = pad_width // 2 pad_right = pad_width - pad_left return x[:, :, pad_top:in_height - pad_bottom, pad_left:in_width - pad_right] class ConvAENew(nn.Module): def __init__(self, channels, kernels): """ :param channels: a list containing all channels including the input image channel (1 for gray, 3 for RGB) :param kernels: a list containing all kernel sizes, it should satisfy: len(kernels) = len(channels) - 1. """ super(ConvAENew, self).__init__() assert isinstance(channels, list) and isinstance(kernels, list) self.encoder = nn.Sequential() for i in range(1, len(channels)): self.encoder.add_module('pad%d' % i, Conv2dSamePad(kernels[i - 1], 2)) self.encoder.add_module('conv%d' % i, nn.Conv2d(channels[i - 1], channels[i], kernel_size=kernels[i - 1], stride=2)) self.encoder.add_module('relu%d' % i, nn.ReLU(True)) self.decoder = nn.Sequential() channels = list(reversed(channels)) kernels = list(reversed(kernels)) sizes = [[12, 11], [24, 21], [48, 42]] for i in range(len(channels) - 1): self.decoder.add_module('deconv%d' % (i + 1), nn. ConvTranspose2d(channels[i], channels[i + 1], kernel_size= kernels[i], stride=2)) self.decoder.add_module('padd%d' % i, ConvTranspose2dSamePad( sizes[i])) self.decoder.add_module('relud%d' % i, nn.ReLU(True)) def forward(self, input_0): primals_1 = self.encoder.conv1.weight primals_3 = self.encoder.conv1.bias primals_2 = self.decoder.deconv1.weight primals_5 = self.decoder.deconv1.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ShulingTang/DSC-Net
ConvAE
false
9,505
[ "MIT" ]
0
2da1e0c654b045057c654cbcbb8a8c23fb832c9d
https://github.com/ShulingTang/DSC-Net/tree/2da1e0c654b045057c654cbcbb8a8c23fb832c9d
DSCNet
import math import torch import torch.nn as nn import torch.nn.functional as F class Conv2dSamePad(nn.Module): """ Implement Tensorflow's 'SAME' padding mode in Conv2d. When an odd number, say `m`, of pixels are need to pad, Tensorflow will pad one more column at right or one more row at bottom. But Pytorch will pad `m+1` pixels, i.e., Pytorch always pads in both sides. So we can pad the tensor in the way of Tensorflow before call the Conv2d module. """ def __init__(self, kernel_size, stride): super(Conv2dSamePad, self).__init__() self.kernel_size = kernel_size if type(kernel_size) in [list, tuple ] else [kernel_size, kernel_size] self.stride = stride if type(stride) in [list, tuple] else [stride, stride] def forward(self, x): in_height = x.size(2) in_width = x.size(3) out_height = math.ceil(float(in_height) / float(self.stride[0])) out_width = math.ceil(float(in_width) / float(self.stride[1])) pad_along_height = (out_height - 1) * self.stride[0 ] + self.kernel_size[0] - in_height pad_along_width = (out_width - 1) * self.stride[1] + self.kernel_size[1 ] - in_width pad_top = math.floor(pad_along_height / 2) pad_left = math.floor(pad_along_width / 2) pad_bottom = pad_along_height - pad_top pad_right = pad_along_width - pad_left return F.pad(x, [pad_left, pad_right, pad_top, pad_bottom], 'constant', 0) class ConvTranspose2dSamePad(nn.Module): """ This module implements the "SAME" padding mode for ConvTranspose2d as in Tensorflow. A tensor with width w_in, feed it to ConvTranspose2d(ci, co, kernel, stride), the width of output tensor T_nopad: w_nopad = (w_in - 1) * stride + kernel If we use padding, i.e., ConvTranspose2d(ci, co, kernel, stride, padding, output_padding), the width of T_pad: w_pad = (w_in - 1) * stride + kernel - (2*padding - output_padding) = w_nopad - (2*padding - output_padding) Yes, in ConvTranspose2d, more padding, the resulting tensor is smaller, i.e., the padding is actually deleting row/col. If `pad`=(2*padding - output_padding) is odd, Pytorch deletes more columns in the left, i.e., the first ceil(pad/2) and last `pad - ceil(pad/2)` columns of T_nopad are deleted to get T_pad. In contrast, Tensorflow deletes more columns in the right, i.e., the first floor(pad/2) and last `pad - floor(pad/2)` columns are deleted. For the height, Pytorch deletes more rows at top, while Tensorflow at bottom. In practice, we usually want `w_pad = w_in * stride` or `w_pad = w_in * stride - 1`, i.e., the "SAME" padding mode in Tensorflow. To determine the value of `w_pad`, we should pass it to this function. So the number of columns to delete: pad = 2*padding - output_padding = w_nopad - w_pad If pad is even, we can directly set padding=pad/2 and output_padding=0 in ConvTranspose2d. If pad is odd, we can use ConvTranspose2d to get T_nopad, and then delete `pad` rows/columns by ourselves. This module should be called after the ConvTranspose2d module with shared kernel_size and stride values. """ def __init__(self, output_size): super(ConvTranspose2dSamePad, self).__init__() self.output_size = output_size def forward(self, x): in_height = x.size(2) in_width = x.size(3) pad_height = in_height - self.output_size[0] pad_width = in_width - self.output_size[1] pad_top = pad_height // 2 pad_bottom = pad_height - pad_top pad_left = pad_width // 2 pad_right = pad_width - pad_left return x[:, :, pad_top:in_height - pad_bottom, pad_left:in_width - pad_right] class ConvAE(nn.Module): def __init__(self, channels, kernels): """ :param channels: a list containing all channels including the input image channel (1 for gray, 3 for RGB) :param kernels: a list containing all kernel sizes, it should satisfy: len(kernels) = len(channels) - 1. """ super(ConvAE, self).__init__() assert isinstance(channels, list) and isinstance(kernels, list) self.encoder = nn.Sequential() for i in range(1, len(channels)): self.encoder.add_module('pad%d' % i, Conv2dSamePad(kernels[i - 1], 2)) self.encoder.add_module('conv%d' % i, nn.Conv2d(channels[i - 1], channels[i], kernel_size=kernels[i - 1], stride=2)) self.encoder.add_module('relu%d' % i, nn.ReLU(True)) self.decoder = nn.Sequential() channels = list(reversed(channels)) kernels = list(reversed(kernels)) sizes = [[12, 11], [24, 21], [48, 42]] for i in range(len(channels) - 1): self.decoder.add_module('deconv%d' % (i + 1), nn. ConvTranspose2d(channels[i], channels[i + 1], kernel_size= kernels[i], stride=2)) self.decoder.add_module('padd%d' % i, ConvTranspose2dSamePad( sizes[i])) self.decoder.add_module('relud%d' % i, nn.ReLU(True)) def forward(self, x): h = self.encoder(x) y = self.decoder(h) return y class SelfExpression(nn.Module): def __init__(self, n): super(SelfExpression, self).__init__() self.Coefficient = nn.Parameter(0.0001 * torch.ones(n, n, dtype= torch.float32), requires_grad=True) def forward(self, x): y = torch.matmul(self.Coefficient, x) return y class DSCNet(nn.Module): def __init__(self, channels, kernels, num_sample): super(DSCNet, self).__init__() self.n = num_sample self.ae = ConvAE(channels, kernels) self.self_expression = SelfExpression(self.n) def forward(self, x): z = self.ae.encoder(x) shape = z.shape z = z.view(self.n, -1) z_recon = self.self_expression(z) z_recon_reshape = z_recon.view(shape) x_recon = self.ae.decoder(z_recon_reshape) return x_recon, z, z_recon def loss_fn(self, x, x_recon, z, z_recon, weight_coef, weight_selfExp): loss_ae = 0.5 * F.mse_loss(x_recon, x, reduction='sum') loss_coef = torch.sum(torch.pow(self.self_expression.Coefficient, 2)) loss_selfExp = 0.5 * F.mse_loss(z_recon, z, reduction='sum') loss = (loss_ae + weight_coef * loss_coef + weight_selfExp * loss_selfExp) loss /= x.size(0) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': [4, 4], 'kernels': [4, 4], 'num_sample': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x4 = xindex x2 = xindex // 36 % 4 tmp19 = tl.load(in_out_ptr0 + x4, xmask) tmp20 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp4 & tmp2 tmp6 = tl.load(in_out_ptr0 + x4, tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr0 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp5, tmp10, tmp11) tmp13 = tl.load(in_out_ptr0 + x4, tmp2 & xmask, other=0.0) tmp14 = tl.load(in_ptr0 + x2, tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.where(tmp4, tmp12, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp2, tmp16, tmp17) tmp21 = tmp19 + tmp20 tmp22 = tl.where(tmp2, tmp18, tmp21) tl.store(in_out_ptr0 + x4, tmp22, xmask) @triton.jit def triton_poi_fused_threshold_backward_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 3 x2 = xindex // 9 x3 = xindex tmp0 = tl.load(in_ptr0 + (21 + x0 + 6 * x1 + 36 * x2), xmask) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1 del buf1 buf7 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(64)](buf2, primals_3, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(buf2, (4, 16), (16, 1), 0), out=buf3) buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (4, 4, 2, 2), (16, 4, 2, 1), 0), primals_5, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups =1, bias=None) assert_size_stride(buf4, (4, 4, 6, 6), (144, 36, 6, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(576)](buf5, primals_6, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool) triton_poi_fused_threshold_backward_3[grid(144)](buf5, buf6, 144, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf5, (4, 4, 3, 3), (144, 36, 6, 1), 21 ), reinterpret_tensor(buf2, (4, 16), (16, 1), 0 ), buf3, primals_2, primals_5, buf0, reinterpret_tensor(buf3, (4, 4, 2, 2), (16, 4, 2, 1), 0), buf6, reinterpret_tensor(primals_4, (4, 4 ), (1, 4), 0), reinterpret_tensor(buf2, (16, 4), (1, 16), 0), buf7 class Conv2dSamePad(nn.Module): """ Implement Tensorflow's 'SAME' padding mode in Conv2d. When an odd number, say `m`, of pixels are need to pad, Tensorflow will pad one more column at right or one more row at bottom. But Pytorch will pad `m+1` pixels, i.e., Pytorch always pads in both sides. So we can pad the tensor in the way of Tensorflow before call the Conv2d module. """ def __init__(self, kernel_size, stride): super(Conv2dSamePad, self).__init__() self.kernel_size = kernel_size if type(kernel_size) in [list, tuple ] else [kernel_size, kernel_size] self.stride = stride if type(stride) in [list, tuple] else [stride, stride] def forward(self, x): in_height = x.size(2) in_width = x.size(3) out_height = math.ceil(float(in_height) / float(self.stride[0])) out_width = math.ceil(float(in_width) / float(self.stride[1])) pad_along_height = (out_height - 1) * self.stride[0 ] + self.kernel_size[0] - in_height pad_along_width = (out_width - 1) * self.stride[1] + self.kernel_size[1 ] - in_width pad_top = math.floor(pad_along_height / 2) pad_left = math.floor(pad_along_width / 2) pad_bottom = pad_along_height - pad_top pad_right = pad_along_width - pad_left return F.pad(x, [pad_left, pad_right, pad_top, pad_bottom], 'constant', 0) class ConvTranspose2dSamePad(nn.Module): """ This module implements the "SAME" padding mode for ConvTranspose2d as in Tensorflow. A tensor with width w_in, feed it to ConvTranspose2d(ci, co, kernel, stride), the width of output tensor T_nopad: w_nopad = (w_in - 1) * stride + kernel If we use padding, i.e., ConvTranspose2d(ci, co, kernel, stride, padding, output_padding), the width of T_pad: w_pad = (w_in - 1) * stride + kernel - (2*padding - output_padding) = w_nopad - (2*padding - output_padding) Yes, in ConvTranspose2d, more padding, the resulting tensor is smaller, i.e., the padding is actually deleting row/col. If `pad`=(2*padding - output_padding) is odd, Pytorch deletes more columns in the left, i.e., the first ceil(pad/2) and last `pad - ceil(pad/2)` columns of T_nopad are deleted to get T_pad. In contrast, Tensorflow deletes more columns in the right, i.e., the first floor(pad/2) and last `pad - floor(pad/2)` columns are deleted. For the height, Pytorch deletes more rows at top, while Tensorflow at bottom. In practice, we usually want `w_pad = w_in * stride` or `w_pad = w_in * stride - 1`, i.e., the "SAME" padding mode in Tensorflow. To determine the value of `w_pad`, we should pass it to this function. So the number of columns to delete: pad = 2*padding - output_padding = w_nopad - w_pad If pad is even, we can directly set padding=pad/2 and output_padding=0 in ConvTranspose2d. If pad is odd, we can use ConvTranspose2d to get T_nopad, and then delete `pad` rows/columns by ourselves. This module should be called after the ConvTranspose2d module with shared kernel_size and stride values. """ def __init__(self, output_size): super(ConvTranspose2dSamePad, self).__init__() self.output_size = output_size def forward(self, x): in_height = x.size(2) in_width = x.size(3) pad_height = in_height - self.output_size[0] pad_width = in_width - self.output_size[1] pad_top = pad_height // 2 pad_bottom = pad_height - pad_top pad_left = pad_width // 2 pad_right = pad_width - pad_left return x[:, :, pad_top:in_height - pad_bottom, pad_left:in_width - pad_right] class ConvAE(nn.Module): def __init__(self, channels, kernels): """ :param channels: a list containing all channels including the input image channel (1 for gray, 3 for RGB) :param kernels: a list containing all kernel sizes, it should satisfy: len(kernels) = len(channels) - 1. """ super(ConvAE, self).__init__() assert isinstance(channels, list) and isinstance(kernels, list) self.encoder = nn.Sequential() for i in range(1, len(channels)): self.encoder.add_module('pad%d' % i, Conv2dSamePad(kernels[i - 1], 2)) self.encoder.add_module('conv%d' % i, nn.Conv2d(channels[i - 1], channels[i], kernel_size=kernels[i - 1], stride=2)) self.encoder.add_module('relu%d' % i, nn.ReLU(True)) self.decoder = nn.Sequential() channels = list(reversed(channels)) kernels = list(reversed(kernels)) sizes = [[12, 11], [24, 21], [48, 42]] for i in range(len(channels) - 1): self.decoder.add_module('deconv%d' % (i + 1), nn. ConvTranspose2d(channels[i], channels[i + 1], kernel_size= kernels[i], stride=2)) self.decoder.add_module('padd%d' % i, ConvTranspose2dSamePad( sizes[i])) self.decoder.add_module('relud%d' % i, nn.ReLU(True)) def forward(self, x): h = self.encoder(x) y = self.decoder(h) return y class SelfExpression(nn.Module): def __init__(self, n): super(SelfExpression, self).__init__() self.Coefficient = nn.Parameter(0.0001 * torch.ones(n, n, dtype= torch.float32), requires_grad=True) def forward(self, x): y = torch.matmul(self.Coefficient, x) return y class DSCNetNew(nn.Module): def __init__(self, channels, kernels, num_sample): super(DSCNetNew, self).__init__() self.n = num_sample self.ae = ConvAE(channels, kernels) self.self_expression = SelfExpression(self.n) def loss_fn(self, x, x_recon, z, z_recon, weight_coef, weight_selfExp): loss_ae = 0.5 * F.mse_loss(x_recon, x, reduction='sum') loss_coef = torch.sum(torch.pow(self.self_expression.Coefficient, 2)) loss_selfExp = 0.5 * F.mse_loss(z_recon, z, reduction='sum') loss = (loss_ae + weight_coef * loss_coef + weight_selfExp * loss_selfExp) loss /= x.size(0) return loss def forward(self, input_0): primals_1 = self.ae.encoder.conv1.weight primals_3 = self.ae.encoder.conv1.bias primals_2 = self.ae.decoder.deconv1.weight primals_6 = self.ae.decoder.deconv1.bias primals_4 = self.self_expression.Coefficient primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1], output[2]
ShulingTang/DSC-Net
DSCNet
false
9,506
[ "MIT" ]
0
2da1e0c654b045057c654cbcbb8a8c23fb832c9d
https://github.com/ShulingTang/DSC-Net/tree/2da1e0c654b045057c654cbcbb8a8c23fb832c9d
MultiHeadedAttention
import math import torch from typing import Tuple from torch import nn class MultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'torch.Tensor'=torch.ones((0, 0, 0), dtype= torch.bool)) ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2), (0, 0, 0) means fake mask. Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask.size(2) > 0: mask = mask.unsqueeze(1).eq(0) mask = mask[:, :, :, :scores.size(-1)] scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'torch.Tensor'=torch.ones((0, 0, 0), dtype= torch.bool), pos_emb: 'torch.Tensor'=torch.empty(0), cache: 'torch.Tensor'=torch.zeros((0, 0, 0, 0))) ->Tuple[torch.Tensor, torch.Tensor]: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). 1.When applying cross attention between decoder and encoder, the batch padding mask for input is in (#batch, 1, T) shape. 2.When applying self attention of encoder, the mask is in (#batch, T, T) shape. 3.When applying self attention of decoder, the mask is in (#batch, L, L) shape. 4.If the different position in decoder see different block of the encoder, such as Mocha, the passed in mask could be in (#batch, L, T) shape. But there is no such case in current Wenet. cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2), where `cache_t == chunk_size * num_decoding_left_chunks` and `head * d_k == size` Returns: torch.Tensor: Output tensor (#batch, time1, d_model). torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2) where `cache_t == chunk_size * num_decoding_left_chunks` and `head * d_k == size` """ q, k, v = self.forward_qkv(query, key, value) if cache.size(0) > 0: key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1) k = torch.cat([key_cache, k], dim=2) v = torch.cat([value_cache, v], dim=2) new_cache = torch.cat((k, v), dim=-1) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask), new_cache def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from typing import Tuple from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 2 x0 = xindex % 4 x2 = xindex // 8 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask) @triton.jit def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((4, 4, 16, 2), (128, 1, 8, 4), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf1, buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 16)](buf0, primals_3, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf5 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf0 triton_poi_fused_clone_2[grid(16, 16)](buf1, buf5, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 16), (16, 0, 1), 0), out=buf6) buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_3[grid(256)](buf6, buf9, 256, 16, XBLOCK= 128, num_warps=8, num_stages=1) del buf6 buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_clone_2[grid(16, 16)](buf2, buf10, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_11 return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0 ), buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 16, 1), (16, 1, 16), 0) class MultiHeadedAttentionNew(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'torch.Tensor'=torch.ones((0, 0, 0), dtype= torch.bool)) ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2), (0, 0, 0) means fake mask. Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask.size(2) > 0: mask = mask.unsqueeze(1).eq(0) mask = mask[:, :, :, :scores.size(-1)] scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, input_0, input_1, input_2): primals_2 = self.linear_q.weight primals_3 = self.linear_q.bias primals_4 = self.linear_k.weight primals_5 = self.linear_k.bias primals_7 = self.linear_v.weight primals_8 = self.linear_v.bias primals_10 = self.linear_out.weight primals_11 = self.linear_out.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
Slyne/wenet
MultiHeadedAttention
false
9,507
[ "Apache-2.0" ]
0
de74d8acf40f47a3c503bff5cf4ed6808a9dad14
https://github.com/Slyne/wenet/tree/de74d8acf40f47a3c503bff5cf4ed6808a9dad14
ZeroModule
import torch import torch as th from torch import nn import torch.random class ZeroModule(nn.Module): """Module that always returns zeros of same shape as input.""" def __init__(self, features_dim: 'int'): """Builds ZeroModule.""" super().__init__() self.features_dim = features_dim def forward(self, x: 'th.Tensor') ->th.Tensor: """Returns zeros of same shape as `x`.""" assert x.shape[1:] == (self.features_dim,) return th.zeros_like(x) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'features_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.random assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_zeros_like_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_like_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf0, class ZeroModuleNew(nn.Module): """Module that always returns zeros of same shape as input.""" def __init__(self, features_dim: 'int'): """Builds ZeroModule.""" super().__init__() self.features_dim = features_dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TaoHuang13/imitation
ZeroModule
false
9,508
[ "MIT" ]
0
f979be0fa05106754f6d1e5a98495d0fedbea598
https://github.com/TaoHuang13/imitation/tree/f979be0fa05106754f6d1e5a98495d0fedbea598
MaxPoolStride1
import torch import torch.nn as nn import torch.nn.functional as F class MaxPoolStride1(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, x): padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode='replicate') pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x) return pooled_x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp5 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * (1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp13 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1 ) * (1 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp21 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1 ) * (2 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPoolStride1New(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1New, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TCC-MonitoramentoInteligente/dev-tool
MaxPoolStride1
false
9,509
[ "MIT" ]
0
d3a1d697c4ba7a5fff54be08541da4fc4811ab5e
https://github.com/TCC-MonitoramentoInteligente/dev-tool/tree/d3a1d697c4ba7a5fff54be08541da4fc4811ab5e
NetVLAD
import torch import torch.nn as nn import torch.nn.functional as F class NetVLAD(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, alpha=100.0, normalize_input=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. """ super(NetVLAD, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = alpha self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self._init_params() def _init_params(self): self.conv.weight = nn.Parameter((2.0 * self.alpha * self.centroids) .unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm(dim=1)) def forward(self, x): N, C = x.shape[:2] if self.normalize_input: x = F.normalize(x, p=2, dim=1) soft_assign = self.conv(x).view(N, self.num_clusters, -1) soft_assign = F.softmax(soft_assign, dim=1) x_flatten = x.view(N, C, -1) residual = x_flatten.expand(self.num_clusters, -1, -1, -1).permute( 1, 0, 2, 3) - self.centroids.expand(x_flatten.size(-1), -1, -1 ).permute(1, 2, 0).unsqueeze(0) residual *= soft_assign.unsqueeze(2) vlad = residual.sum(dim=-1) vlad = F.normalize(vlad, p=2, dim=2) vlad = vlad.view(x.size(0), -1) vlad = F.normalize(vlad, p=2, dim=1) return vlad def get_inputs(): return [torch.rand([4, 128, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = yindex // 128 y0 = yindex % 128 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y1), ymask, eviction_policy= 'evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tl.store(out_ptr0 + (y0 + 128 * x2 + 524288 * y1), tmp5, ymask) @triton.jit def triton_per_fused__softmax_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 64 * x0), None) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = triton_helpers.max2(tmp3, 1)[:, None] tmp6 = tmp2 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp2, None) tl.store(out_ptr0 + x0, tmp5, None) tl.store(out_ptr1 + x0, tmp10, None) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 128 x2 = xindex // 8192 x4 = xindex % 8192 tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last') x1 = xindex // 128 % 64 _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x5 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * r3 + 524288 * x2), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr2 + (x1 + 64 * r3 + 262144 * x2), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r3 + 4096 * x2), rmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r3 + 4096 * x2), rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask, tmp12, _tmp11) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + x5, tmp11, None) @triton.jit def triton_per_fused_linalg_vector_norm_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_red_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 4 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp9 = libdevice.sqrt(tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp10 / tmp13 tmp15 = triton_helpers.maximum(tmp9, tmp12) tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 128), (128, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) get_raw_stream(0) triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0, 16384, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128), torch.float32) triton_poi_fused_div_1[grid(512, 4096)](primals_1, buf0, buf1, 512, 4096, XBLOCK=16, YBLOCK=256, num_warps=8, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf3 = buf2 del buf2 buf4 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) triton_per_fused__softmax_convolution_2[grid(16384)](buf3, primals_3, buf4, buf5, 16384, 64, XBLOCK=32, num_warps=8, num_stages=1) del primals_3 buf6 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32) triton_red_fused_mul_sub_sum_3[grid(32768)](buf1, primals_4, buf3, buf4, buf5, buf6, 32768, 4096, XBLOCK=8, RBLOCK=256, num_warps= 16, num_stages=1) buf7 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32) buf8 = reinterpret_tensor(buf7, (4, 64, 1), (64, 1, 1), 0) del buf7 triton_per_fused_linalg_vector_norm_4[grid(256)](buf8, buf6, 256, 128, XBLOCK=8, num_warps=8, num_stages=1) buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf10 = reinterpret_tensor(buf9, (4, 1), (1, 1), 0) del buf9 buf11 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32) triton_red_fused_div_linalg_vector_norm_5[grid(4)](buf10, buf6, buf8, buf11, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) return (buf11, primals_2, primals_4, buf1, buf3, buf4, buf5, buf6, buf8, buf10) class NetVLADNew(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, alpha=100.0, normalize_input=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. """ super(NetVLADNew, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = alpha self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self._init_params() def _init_params(self): self.conv.weight = nn.Parameter((2.0 * self.alpha * self.centroids) .unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm(dim=1)) def forward(self, input_0): primals_4 = self.centroids primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Shubodh/NetVLAD-pytorch
NetVLAD
false
9,510
[ "MIT" ]
0
ea45bac16dbb3e3bec4172df58715bf3526ee502
https://github.com/Shubodh/NetVLAD-pytorch/tree/ea45bac16dbb3e3bec4172df58715bf3526ee502
TemporalDecay
import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter class TemporalDecay(nn.Module): def __init__(self, input_size, rnn_hid_size): super(TemporalDecay, self).__init__() self.rnn_hid_size = rnn_hid_size self.build(input_size) def build(self, input_size): self.W = Parameter(torch.Tensor(self.rnn_hid_size, input_size)) self.b = Parameter(torch.Tensor(self.rnn_hid_size)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.W.size(0)) self.W.data.uniform_(-stdv, stdv) if self.b is not None: self.b.data.uniform_(-stdv, stdv) def forward(self, d): gamma = F.relu(F.linear(d, self.W, self.b)) gamma = torch.exp(-gamma) return gamma def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'rnn_hid_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_neg_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = 0.0 tmp8 = tmp4 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_exp_neg_relu_threshold_backward_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf2 class TemporalDecayNew(nn.Module): def __init__(self, input_size, rnn_hid_size): super(TemporalDecayNew, self).__init__() self.rnn_hid_size = rnn_hid_size self.build(input_size) def build(self, input_size): self.W = Parameter(torch.Tensor(self.rnn_hid_size, input_size)) self.b = Parameter(torch.Tensor(self.rnn_hid_size)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.W.size(0)) self.W.data.uniform_(-stdv, stdv) if self.b is not None: self.b.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_1 = self.W primals_2 = self.b primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Sobhan1996/BRITS-master
TemporalDecay
false
9,511
[ "MIT" ]
0
66726ec104dad43c6d8367b0c9ef8f19daf65f0e
https://github.com/Sobhan1996/BRITS-master/tree/66726ec104dad43c6d8367b0c9ef8f19daf65f0e
GCN2
import math import torch import torch.nn as nn import torch.nn.functional as F from scipy.sparse import * def dropout(x, drop_prob, shared_axes=[], training=False): """ Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied. """ if drop_prob == 0 or drop_prob is None or not training: return x sz = list(x.size()) for i in shared_axes: sz[i] = 1 mask = x.new(*sz).bernoulli_(1.0 - drop_prob).div_(1.0 - drop_prob) mask = mask.expand_as(x) return x * mask class GraphConvolution(nn.Module): def __init__(self, in_features, out_features, with_bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features) ) if with_bias: self.bias = nn.Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def init_params(self): for param in self.parameters(): if len(param.size()) == 2: nn.init.xavier_uniform_(param) else: nn.init.constant_(param, 0.0) def forward(self, input, adj): """ Graph Convolutional Layer forward function """ if input.data.is_sparse: support = torch.spmm(input, self.weight) else: support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN2(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout=0.5, with_bias=True): super(GCN2, self).__init__() self.nfeat = nfeat self.hidden_sizes = [nhid] self.nclass = nclass self.gc1 = GraphConvolution(nfeat, nhid, with_bias=with_bias) self.gc2 = GraphConvolution(nhid, nclass, with_bias=with_bias) self.dropout = dropout self.with_bias = with_bias def forward(self, x, adj): x1 = F.relu(self.gc1(x, adj)) x1 = F.dropout(x1, self.dropout, training=self.training) x2 = self.gc2(x1, adj) return x1, x2 def initialize(self): """Initialize parameters of GCN. """ self.gc1.reset_parameters() self.gc2.reset_parameters() def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn from scipy.sparse import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_3, buf3, alpha=1, beta=1, out=buf4) del buf3 del primals_6 return buf2, buf4, buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) def dropout(x, drop_prob, shared_axes=[], training=False): """ Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied. """ if drop_prob == 0 or drop_prob is None or not training: return x sz = list(x.size()) for i in shared_axes: sz[i] = 1 mask = x.new(*sz).bernoulli_(1.0 - drop_prob).div_(1.0 - drop_prob) mask = mask.expand_as(x) return x * mask class GraphConvolution(nn.Module): def __init__(self, in_features, out_features, with_bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features) ) if with_bias: self.bias = nn.Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def init_params(self): for param in self.parameters(): if len(param.size()) == 2: nn.init.xavier_uniform_(param) else: nn.init.constant_(param, 0.0) def forward(self, input, adj): """ Graph Convolutional Layer forward function """ if input.data.is_sparse: support = torch.spmm(input, self.weight) else: support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN2New(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout=0.5, with_bias=True): super(GCN2New, self).__init__() self.nfeat = nfeat self.hidden_sizes = [nhid] self.nclass = nclass self.gc1 = GraphConvolution(nfeat, nhid, with_bias=with_bias) self.gc2 = GraphConvolution(nhid, nclass, with_bias=with_bias) self.dropout = dropout self.with_bias = with_bias def initialize(self): """Initialize parameters of GCN. """ self.gc1.reset_parameters() self.gc2.reset_parameters() def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
Ononoki-Yotsugi/IDGL
GCN2
false
9,512
[ "Apache-2.0" ]
0
a99f840681a4ae26c2740ed9e9302d4e15a68c7f
https://github.com/Ononoki-Yotsugi/IDGL/tree/a99f840681a4ae26c2740ed9e9302d4e15a68c7f
QNet
import torch import torch.nn as nn import torch.nn.functional as F class QNet(nn.Module): def __init__(self, input_dim, output_dim): super(QNet, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.fc1 = nn.Linear(input_dim, 64) self.fc2 = nn.Linear(64, 64) self.fc3 = nn.Linear(64, 64) self.out = nn.Linear(64, output_dim) def forward(self, x): x = self.fc1(x) x = F.tanh(self.fc2(x)) x = F.tanh(self.fc3(x)) actions_value = self.out(x) return actions_value def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64), (64, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (4, 64), (64, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf2, primals_5, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf3 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 64), (1, 64), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf3 triton_poi_fused_tanh_0[grid(4096)](buf4, primals_7, 4096, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_9 return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf2, buf4, primals_8, primals_6, primals_4 class QNetNew(nn.Module): def __init__(self, input_dim, output_dim): super(QNetNew, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.fc1 = nn.Linear(input_dim, 64) self.fc2 = nn.Linear(64, 64) self.fc3 = nn.Linear(64, 64) self.out = nn.Linear(64, output_dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.out.weight primals_9 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
SunlightWarrior/q_learning
QNet
false
9,513
[ "MIT" ]
0
3c5f0c700fbe84ca4859165513123f404c44937f
https://github.com/SunlightWarrior/q_learning/tree/3c5f0c700fbe84ca4859165513123f404c44937f
TransformerEncoderLayer
import torch import torch.nn as nn class MultiHeadAttention(nn.Module): """Multi-Head Attention module.""" def __init__(self, n_head=8, d_model=512, d_k=64, d_v=64, dropout=0.1, qkv_bias=False, mask_value=0): super().__init__() self.mask_value = mask_value self.n_head = n_head self.d_k = d_k self.d_v = d_v self.scale = d_k ** -0.5 self.dim_k = n_head * d_k self.dim_v = n_head * d_v self.linear_q = nn.Linear(self.dim_k, self.dim_k, bias=qkv_bias) self.linear_k = nn.Linear(self.dim_k, self.dim_k, bias=qkv_bias) self.linear_v = nn.Linear(self.dim_v, self.dim_v, bias=qkv_bias) self.fc = nn.Linear(self.dim_v, d_model, bias=qkv_bias) self.attn_drop = nn.Dropout(dropout) self.proj_drop = nn.Dropout(dropout) def forward(self, q, k, v, mask=None): batch_size, len_q, _ = q.size() _, len_k, _ = k.size() q = self.linear_q(q).view(batch_size, len_q, self.n_head, self.d_k) k = self.linear_k(k).view(batch_size, len_k, self.n_head, self.d_k) v = self.linear_v(v).view(batch_size, len_k, self.n_head, self.d_v) q = q.permute(0, 2, 1, 3) k = k.permute(0, 2, 3, 1) v = v.permute(0, 2, 1, 3) logits = torch.matmul(q, k) * self.scale if mask is not None: if mask.dim() == 3: mask = mask.unsqueeze(1) elif mask.dim() == 2: mask = mask.unsqueeze(1).unsqueeze(1) logits = logits.masked_fill(mask == self.mask_value, float('-inf')) weights = logits.softmax(dim=-1) weights = self.attn_drop(weights) attn_out = torch.matmul(weights, v).transpose(1, 2) attn_out = attn_out.reshape(batch_size, len_q, self.dim_v) attn_out = self.fc(attn_out) attn_out = self.proj_drop(attn_out) return attn_out class PositionwiseFeedForward(nn.Module): """A two-feed-forward-layer module.""" def __init__(self, d_in, d_hid, dropout=0.1, act_layer=nn.GELU): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.act = act_layer() self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.w_1(x) x = self.act(x) x = self.dropout(x) x = self.w_2(x) x = self.dropout(x) return x class TransformerEncoderLayer(nn.Module): """""" def __init__(self, d_model=512, d_inner=256, n_head=8, d_k=64, d_v=64, dropout=0.1, qkv_bias=False, mask_value=0, act_layer=nn.GELU): super().__init__() self.norm1 = nn.LayerNorm(d_model) self.attn = MultiHeadAttention(n_head, d_model, d_k, d_v, qkv_bias= qkv_bias, dropout=dropout, mask_value=mask_value) self.norm2 = nn.LayerNorm(d_model) self.mlp = PositionwiseFeedForward(d_model, d_inner, dropout= dropout, act_layer=act_layer) def forward(self, x, mask=None): residual = x x = self.norm1(x) x = residual + self.attn(x, x, x, mask) residual = x x = self.norm2(x) x = residual + self.mlp(x) return x def get_inputs(): return [torch.rand([4, 4, 512])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp21 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 * tmp18 tmp22 = tmp20 * tmp21 tmp24 = tmp22 + tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp24, None) tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 4 x2 = xindex // 256 % 8 x3 = xindex // 2048 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2 + 512 * x1 + 2048 * x3), None) tl.store(out_ptr0 + x4, tmp0, None) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 2048 * y1), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.125 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 8 x2 = xindex // 512 % 4 x3 = xindex // 2048 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2 + 256 * x1 + 2048 * x3), None) tl.store(out_ptr0 + x4, tmp0, None) @triton.jit def triton_per_fused_add_native_layer_norm_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.load(in_ptr1 + (r1 + 512 * x0), None) tmp23 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 512, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 512.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tmp21 = tmp2 - tmp10 tmp22 = tmp21 * tmp20 tmp24 = tmp22 * tmp23 tmp26 = tmp24 + tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp26, None) tl.store(out_ptr0 + x0, tmp10, None) @triton.jit def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x2, None) tmp3 = tl.load(in_out_ptr0 + x2, None) tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 512), (2048, 512, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (512, 512), (512, 1)) assert_size_stride(primals_5, (512, 512), (512, 1)) assert_size_stride(primals_6, (512, 512), (512, 1)) assert_size_stride(primals_7, (512, 512), (512, 1)) assert_size_stride(primals_8, (512,), (1,)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (256, 512), (512, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (512, 256), (256, 1)) assert_size_stride(primals_13, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 512), (2048, 512, 1), torch.float32) get_raw_stream(0) triton_per_fused_native_layer_norm_0[grid(16)](buf3, primals_1, primals_2, primals_3, buf0, buf4, 16, 512, num_warps=4, num_stages=1) del primals_2 del primals_3 buf5 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 512), (1, 512), 0), out=buf5) buf6 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 512), (512, 1), 0), reinterpret_tensor(primals_5, (512, 512), (1, 512), 0), out=buf6) buf7 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 512), (1, 512), 0), out=buf7) buf8 = empty_strided_cuda((4, 8, 4, 64), (2048, 256, 64, 1), torch. float32) triton_poi_fused_clone_1[grid(8192)](buf5, buf8, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 8, 64, 4), (2048, 256, 4, 1), 0) del buf5 triton_poi_fused_clone_2[grid(2048, 4)](buf6, buf9, 2048, 4, XBLOCK =4, YBLOCK=256, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (32, 4, 64), (256, 64, 1), 0), reinterpret_tensor(buf9, (32, 64, 4), (256, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused__softmax_3[grid(512)](buf10, buf11, 512, XBLOCK= 256, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 8, 4, 4), (128, 16, 4, 1), 0) del buf10 triton_poi_fused__softmax_4[grid(512)](buf11, buf12, 512, XBLOCK= 128, num_warps=4, num_stages=1) del buf11 buf13 = reinterpret_tensor(buf6, (4, 8, 4, 64), (2048, 256, 64, 1), 0) del buf6 triton_poi_fused_clone_1[grid(8192)](buf7, buf13, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf7, (32, 4, 64), (256, 64, 1), 0) del buf7 extern_kernels.bmm(reinterpret_tensor(buf12, (32, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf13, (32, 4, 64), (256, 64, 1), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 8, 64), (2048, 512, 64, 1), torch .float32) triton_poi_fused_clone_5[grid(8192)](buf14, buf15, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf14, (16, 512), (512, 1), 0) del buf14 extern_kernels.mm(reinterpret_tensor(buf15, (16, 512), (512, 1), 0), reinterpret_tensor(primals_7, (512, 512), (1, 512), 0), out=buf16) buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf20 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 1), 0) del buf18 buf21 = empty_strided_cuda((4, 4, 512), (2048, 512, 1), torch.float32) triton_per_fused_add_native_layer_norm_6[grid(16)](buf20, primals_1, buf16, primals_8, primals_9, buf17, buf21, 16, 512, num_warps=4, num_stages=1) del primals_9 buf22 = empty_strided_cuda((16, 256), (256, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf21, (16, 512 ), (512, 1), 0), reinterpret_tensor(primals_10, (512, 256), (1, 512), 0), alpha=1, beta=1, out=buf22) del primals_11 buf23 = empty_strided_cuda((4, 4, 256), (1024, 256, 1), torch.float32) triton_poi_fused_gelu_7[grid(4096)](buf22, buf23, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf24 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf23, (16, 256), (256, 1), 0), reinterpret_tensor(primals_12, (256, 512), (1, 256), 0), out=buf24) buf25 = reinterpret_tensor(buf24, (4, 4, 512), (2048, 512, 1), 0) del buf24 triton_poi_fused_add_8[grid(8192)](buf25, primals_1, buf16, primals_13, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 return buf25, primals_1, primals_8, buf0, buf3, reinterpret_tensor(buf4, (16, 512), (512, 1), 0), buf12, reinterpret_tensor(buf15, (16, 512), (512, 1), 0), buf16, buf17, buf20, reinterpret_tensor(buf21, (16, 512), (512, 1), 0), buf22, reinterpret_tensor(buf23, (16, 256), ( 256, 1), 0), primals_12, primals_10, primals_7, reinterpret_tensor( buf13, (32, 64, 4), (256, 1, 64), 0), reinterpret_tensor(buf8, (32, 64, 4), (256, 1, 64), 0), reinterpret_tensor(buf9, (32, 4, 64), ( 256, 1, 4), 0), primals_6, primals_5, primals_4 class MultiHeadAttention(nn.Module): """Multi-Head Attention module.""" def __init__(self, n_head=8, d_model=512, d_k=64, d_v=64, dropout=0.1, qkv_bias=False, mask_value=0): super().__init__() self.mask_value = mask_value self.n_head = n_head self.d_k = d_k self.d_v = d_v self.scale = d_k ** -0.5 self.dim_k = n_head * d_k self.dim_v = n_head * d_v self.linear_q = nn.Linear(self.dim_k, self.dim_k, bias=qkv_bias) self.linear_k = nn.Linear(self.dim_k, self.dim_k, bias=qkv_bias) self.linear_v = nn.Linear(self.dim_v, self.dim_v, bias=qkv_bias) self.fc = nn.Linear(self.dim_v, d_model, bias=qkv_bias) self.attn_drop = nn.Dropout(dropout) self.proj_drop = nn.Dropout(dropout) def forward(self, q, k, v, mask=None): batch_size, len_q, _ = q.size() _, len_k, _ = k.size() q = self.linear_q(q).view(batch_size, len_q, self.n_head, self.d_k) k = self.linear_k(k).view(batch_size, len_k, self.n_head, self.d_k) v = self.linear_v(v).view(batch_size, len_k, self.n_head, self.d_v) q = q.permute(0, 2, 1, 3) k = k.permute(0, 2, 3, 1) v = v.permute(0, 2, 1, 3) logits = torch.matmul(q, k) * self.scale if mask is not None: if mask.dim() == 3: mask = mask.unsqueeze(1) elif mask.dim() == 2: mask = mask.unsqueeze(1).unsqueeze(1) logits = logits.masked_fill(mask == self.mask_value, float('-inf')) weights = logits.softmax(dim=-1) weights = self.attn_drop(weights) attn_out = torch.matmul(weights, v).transpose(1, 2) attn_out = attn_out.reshape(batch_size, len_q, self.dim_v) attn_out = self.fc(attn_out) attn_out = self.proj_drop(attn_out) return attn_out class PositionwiseFeedForward(nn.Module): """A two-feed-forward-layer module.""" def __init__(self, d_in, d_hid, dropout=0.1, act_layer=nn.GELU): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.act = act_layer() self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.w_1(x) x = self.act(x) x = self.dropout(x) x = self.w_2(x) x = self.dropout(x) return x class TransformerEncoderLayerNew(nn.Module): """""" def __init__(self, d_model=512, d_inner=256, n_head=8, d_k=64, d_v=64, dropout=0.1, qkv_bias=False, mask_value=0, act_layer=nn.GELU): super().__init__() self.norm1 = nn.LayerNorm(d_model) self.attn = MultiHeadAttention(n_head, d_model, d_k, d_v, qkv_bias= qkv_bias, dropout=dropout, mask_value=mask_value) self.norm2 = nn.LayerNorm(d_model) self.mlp = PositionwiseFeedForward(d_model, d_inner, dropout= dropout, act_layer=act_layer) def forward(self, input_0): primals_2 = self.norm1.weight primals_3 = self.norm1.bias primals_4 = self.attn.linear_q.weight primals_5 = self.attn.linear_k.weight primals_6 = self.attn.linear_v.weight primals_7 = self.attn.fc.weight primals_8 = self.norm2.weight primals_9 = self.norm2.bias primals_10 = self.mlp.w_1.weight primals_11 = self.mlp.w_1.bias primals_12 = self.mlp.w_2.weight primals_13 = self.mlp.w_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
SamDM/mmocr
TransformerEncoderLayer
false
9,514
[ "Apache-2.0" ]
0
4cb69141ff8d28c8b1437bf28242e368a0e6ec4f
https://github.com/SamDM/mmocr/tree/4cb69141ff8d28c8b1437bf28242e368a0e6ec4f
CrossAttentionSublayer
import math import torch from torch import nn import torch.optim class ScaledDotAttention(torch.nn.Module): def __init__(self, model_dim, n_heads, dropout=0.0): """ Creates a ScaledDotAttention. :param model_dim: The model dimensions. :param n_heads: The number of heads. :param dropout: The dropout value. Default 0.0. """ super().__init__() self.model_dim = model_dim self.n_heads = n_heads self.lin_k = torch.nn.Linear(self.model_dim, self.model_dim, bias=False ) self.lin_q = torch.nn.Linear(self.model_dim, self.model_dim, bias=False ) self.lin_v = torch.nn.Linear(self.model_dim, self.model_dim, bias=False ) self.dropout = torch.nn.Dropout(dropout) self.lin_o = torch.nn.Linear(self.model_dim, self.model_dim, bias=False ) self.head_dim = self.model_dim // self.n_heads self.scale = math.sqrt(self.head_dim) def forward(self, inputs): """Scaled dot-product attention forward-pass :param inputs: dictionary with query, key, value and mask tensors the shape of the tensors are (tstep, bsize, dim) except for the mask which is (bsize, query_len, key_len) :return: the output from the forward pass, the attention weights """ q, k, v, mask = inputs q_len, _, _ = q.shape query, key, value = self._project_and_reshape(q, k, v) attn_weights = self._compute_attn_weights(query, key, mask) attn_probs = self.dropout(attn_weights) scores = torch.matmul(attn_probs, value) out = self.lin_o(self._view_as_concat(scores, q_len)) return out, attn_weights def _project_and_reshape(self, q, k, v): """ Projects the q, k and v and reshapes it into size (bsize, n_heads, q|k|v_len, head_dim). :param q: q of shape (q_len, b_size, model_dim) :param k: k of shape (k_len, b_size, model_dim) :param v: v of shape (v_len, b_size, model_dim) :return: The query, key, value of shape (b_size, n_heads, q|k|v_len, head_dim). """ query = self._view_as_headed(self.lin_q(q)) key = self._view_as_headed(self.lin_k(k)) value = self._view_as_headed(self.lin_v(v)) return query, key, value def _compute_attn_weights(self, query, key, mask): """ Computes the normalized attention scores. :param query: The query of shape (b_size, n_heads, q_len, head_dim). :param key: The key of shape (b_size, n_heads, k_len, head_dim). :param mask: The value of shape (b_size, _, k_len). :return: The normalized attention scores of shape (b_size, n_heads, q_len, k_len). """ attn = torch.matmul(query.div(self.scale), key.transpose(-2, -1)) attn = self._apply_mask(mask, attn) return attn.softmax(dim=-1) def _view_as_headed(self, data): """ Reshapes the data into a head format. :param data: (seq_len, b_size, model_dim) :return: (b_size, n_heads, seq_len, head_dim). """ return data.view(data.shape[0], data.shape[1], self.n_heads, -1 ).permute(1, 2, 0, 3) def _view_as_concat(self, data, q_len): return data.permute(2, 0, 1, 3).contiguous().view(q_len, -1, self. model_dim) @staticmethod def _apply_mask(mask, attn): if mask is not None: mask = mask.unsqueeze(1) attn.masked_fill_(mask, -100000000.0) return attn class BaseSublayer(nn.Module): def __init__(self, model_dim, dropout=0.1, is_pre_norm=False): """ Creates a BaseSublayer. :param model_dim: The model dimension. :param dropout: The dropout layer. :param is_pre_norm: Whether it should use pre_norm transformer layers. Default: False. """ super().__init__() self.is_pre_norm = is_pre_norm self.layer_norm = nn.LayerNorm(model_dim, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, **kwargs): raise NotImplementedError('BaseSublayer does not implement forward.') def apply_pre_norm_if_needed(self, x): """ Applies pre_norm to the input if needed. If pre_norm is false, the input remains unchanged. :param x: The input. :return: The output. """ if self.is_pre_norm: x = self.layer_norm(x) return x def apply_post_norm_if_needed(self, x): """ Applies post_norm to the input if needed. If pre_norm is true, the input remains unchanged. :param x: The input. :return: The output. """ if not self.is_pre_norm: x = self.layer_norm(x) return x def apply_residual(self, residual, x): """ Applies the residual connection. :param residual: The residual. :param x: The input x. :return: The output of the residual connection. """ return residual + self.dropout(x) class CrossAttentionSublayer(BaseSublayer): def __init__(self, model_dim, n_heads, dropout=0.1, attn_dropout=0.0, is_pre_norm=False): """ Creates a CrossAttentionSublayer. :param model_dim: The model dimension. :param n_heads: The number of attention heads. :param dropout: The dropout rate for the residual connection. :param is_pre_norm: Whether the layer type is pre_norm. Default: True. """ super().__init__(model_dim, dropout, is_pre_norm) self.attn = ScaledDotAttention(model_dim, n_heads, attn_dropout) def forward(self, query, key, value, mask=None, **kwargs): """ Performs a forward pass over the CrossAttentionSublayer. :param query: The query. For encoder-decoder attention, it is the output from the previous decoder layer. :param key: The key. For encoder-decoder attention, it is the output from the encoder. :param value: The mask. For encoder-decoder attention, it is the output from the encoder. :param mask: The mask. For encoder-decoder attention, it is the encoder mask. :return: The output of the CrossAttentionSublayer. """ residual = query query = self.apply_pre_norm_if_needed(query) attn_out, attn_weights = self.attn((query, key, value, mask)) out = self.apply_residual(residual, attn_out) out = self.apply_post_norm_if_needed(out) return out, attn_weights def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'model_dim': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x1 + 4 * y0), tmp2, xmask & ymask) tl.store(out_ptr1 + (y0 + 16 * x1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf13 = empty_strided_cuda((16, 1, 4), (1, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_div_transpose_0[grid(16, 4)](buf0, buf3, buf13, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (1, 0, 16), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 0), 0), out=buf7) buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_3[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(16)](primals_1, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(64)](primals_1, buf9, buf10, buf11, primals_8, primals_9, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf11 del primals_9 return buf12, buf6, primals_1, primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), buf9, primals_7, reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0 ), buf13, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0) class ScaledDotAttention(torch.nn.Module): def __init__(self, model_dim, n_heads, dropout=0.0): """ Creates a ScaledDotAttention. :param model_dim: The model dimensions. :param n_heads: The number of heads. :param dropout: The dropout value. Default 0.0. """ super().__init__() self.model_dim = model_dim self.n_heads = n_heads self.lin_k = torch.nn.Linear(self.model_dim, self.model_dim, bias=False ) self.lin_q = torch.nn.Linear(self.model_dim, self.model_dim, bias=False ) self.lin_v = torch.nn.Linear(self.model_dim, self.model_dim, bias=False ) self.dropout = torch.nn.Dropout(dropout) self.lin_o = torch.nn.Linear(self.model_dim, self.model_dim, bias=False ) self.head_dim = self.model_dim // self.n_heads self.scale = math.sqrt(self.head_dim) def forward(self, inputs): """Scaled dot-product attention forward-pass :param inputs: dictionary with query, key, value and mask tensors the shape of the tensors are (tstep, bsize, dim) except for the mask which is (bsize, query_len, key_len) :return: the output from the forward pass, the attention weights """ q, k, v, mask = inputs q_len, _, _ = q.shape query, key, value = self._project_and_reshape(q, k, v) attn_weights = self._compute_attn_weights(query, key, mask) attn_probs = self.dropout(attn_weights) scores = torch.matmul(attn_probs, value) out = self.lin_o(self._view_as_concat(scores, q_len)) return out, attn_weights def _project_and_reshape(self, q, k, v): """ Projects the q, k and v and reshapes it into size (bsize, n_heads, q|k|v_len, head_dim). :param q: q of shape (q_len, b_size, model_dim) :param k: k of shape (k_len, b_size, model_dim) :param v: v of shape (v_len, b_size, model_dim) :return: The query, key, value of shape (b_size, n_heads, q|k|v_len, head_dim). """ query = self._view_as_headed(self.lin_q(q)) key = self._view_as_headed(self.lin_k(k)) value = self._view_as_headed(self.lin_v(v)) return query, key, value def _compute_attn_weights(self, query, key, mask): """ Computes the normalized attention scores. :param query: The query of shape (b_size, n_heads, q_len, head_dim). :param key: The key of shape (b_size, n_heads, k_len, head_dim). :param mask: The value of shape (b_size, _, k_len). :return: The normalized attention scores of shape (b_size, n_heads, q_len, k_len). """ attn = torch.matmul(query.div(self.scale), key.transpose(-2, -1)) attn = self._apply_mask(mask, attn) return attn.softmax(dim=-1) def _view_as_headed(self, data): """ Reshapes the data into a head format. :param data: (seq_len, b_size, model_dim) :return: (b_size, n_heads, seq_len, head_dim). """ return data.view(data.shape[0], data.shape[1], self.n_heads, -1 ).permute(1, 2, 0, 3) def _view_as_concat(self, data, q_len): return data.permute(2, 0, 1, 3).contiguous().view(q_len, -1, self. model_dim) @staticmethod def _apply_mask(mask, attn): if mask is not None: mask = mask.unsqueeze(1) attn.masked_fill_(mask, -100000000.0) return attn class BaseSublayer(nn.Module): def __init__(self, model_dim, dropout=0.1, is_pre_norm=False): """ Creates a BaseSublayer. :param model_dim: The model dimension. :param dropout: The dropout layer. :param is_pre_norm: Whether it should use pre_norm transformer layers. Default: False. """ super().__init__() self.is_pre_norm = is_pre_norm self.layer_norm = nn.LayerNorm(model_dim, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, **kwargs): raise NotImplementedError('BaseSublayer does not implement forward.') def apply_pre_norm_if_needed(self, x): """ Applies pre_norm to the input if needed. If pre_norm is false, the input remains unchanged. :param x: The input. :return: The output. """ if self.is_pre_norm: x = self.layer_norm(x) return x def apply_post_norm_if_needed(self, x): """ Applies post_norm to the input if needed. If pre_norm is true, the input remains unchanged. :param x: The input. :return: The output. """ if not self.is_pre_norm: x = self.layer_norm(x) return x def apply_residual(self, residual, x): """ Applies the residual connection. :param residual: The residual. :param x: The input x. :return: The output of the residual connection. """ return residual + self.dropout(x) class CrossAttentionSublayerNew(BaseSublayer): def __init__(self, model_dim, n_heads, dropout=0.1, attn_dropout=0.0, is_pre_norm=False): """ Creates a CrossAttentionSublayer. :param model_dim: The model dimension. :param n_heads: The number of attention heads. :param dropout: The dropout rate for the residual connection. :param is_pre_norm: Whether the layer type is pre_norm. Default: True. """ super().__init__(model_dim, dropout, is_pre_norm) self.attn = ScaledDotAttention(model_dim, n_heads, attn_dropout) def forward(self, input_0, input_1, input_2): primals_8 = self.layer_norm.weight primals_9 = self.layer_norm.bias primals_4 = self.attn.lin_k.weight primals_5 = self.attn.lin_q.weight primals_6 = self.attn.lin_v.weight primals_7 = self.attn.lin_o.weight primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
Nickeilf/pysimt
CrossAttentionSublayer
false
9,515
[ "MIT" ]
0
05c8de92d0e2b930e40939ad3695d8d2c2954dda
https://github.com/Nickeilf/pysimt/tree/05c8de92d0e2b930e40939ad3695d8d2c2954dda
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 16 * 16, 8000) self.fc2 = nn.Linear(8000, 500) self.fc3 = nn.Linear(500, 2) self.dropout = nn.Dropout(0.25) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(-1, 64 * 16 * 16) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = torch.sigmoid(self.fc3(x)) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 500 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_sigmoid_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (8000, 16384), (16384, 1)) assert_size_stride(primals_9, (8000,), (1,)) assert_size_stride(primals_10, (500, 8000), (8000, 1)) assert_size_stride(primals_11, (500,), (1,)) assert_size_stride(primals_12, (2, 500), (500, 1)) assert_size_stride(primals_13, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2, buf3, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6, buf7, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 16, 16), (16384, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(65536)](buf9, primals_7, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8) buf11 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf9, buf10, buf11, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((1, 8000), (8000, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (1, 16384), (0, 1), 0), reinterpret_tensor(primals_8, (16384, 8000), (1, 16384), 0), out=buf12) buf13 = buf12 del buf12 triton_poi_fused_relu_6[grid(8000)](buf13, primals_9, 8000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((1, 500), (500, 1), torch.float32) extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (8000, 500), (1, 8000), 0), out=buf14) buf15 = buf14 del buf14 triton_poi_fused_relu_7[grid(500)](buf15, primals_11, 500, XBLOCK= 256, num_warps=4, num_stages=1) del primals_11 buf16 = empty_strided_cuda((1, 2), (2, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_12, (500, 2), ( 1, 500), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_sigmoid_8[grid(2)](buf17, primals_13, 2, XBLOCK=2, num_warps=1, num_stages=1) del primals_13 return (buf17, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (1, 16384), (16384, 1), 0), buf13, buf15, buf17, primals_12, primals_10, primals_8) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 16 * 16, 8000) self.fc2 = nn.Linear(8000, 500) self.fc3 = nn.Linear(500, 2) self.dropout = nn.Dropout(0.25) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_12 = self.fc3.weight primals_13 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
LSaldyt/laser-dog
Net
false
9,516
[ "MIT" ]
0
168c8bfea95dcd27a499f00f191232d67ae63c1c
https://github.com/LSaldyt/laser-dog/tree/168c8bfea95dcd27a499f00f191232d67ae63c1c
Net
import torch import torch.nn as nn class Net(nn.Module): def __init__(self, input_d): super(Net, self).__init__() self.fc1 = nn.Linear(input_d, int(input_d / 2)) def forward(self, x): x = torch.sigmoid(self.fc1(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_d': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(128)](buf1, primals_2, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class NetNew(nn.Module): def __init__(self, input_d): super(NetNew, self).__init__() self.fc1 = nn.Linear(input_d, int(input_d / 2)) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Tenoke/models
Net
false
9,517
[ "Apache-2.0" ]
0
84baffe34509d2f8b61689e043db2130fec8c171
https://github.com/Tenoke/models/tree/84baffe34509d2f8b61689e043db2130fec8c171
GAT
import torch import torch.nn as nn import torch.nn.functional as F class GATLayer(nn.Module): def __init__(self, input_feature, output_feature, dropout, alpha, concat=True): super(GATLayer, self).__init__() self.input_feature = input_feature self.output_feature = output_feature self.alpha = alpha self.dropout = dropout self.concat = concat self.a = nn.Parameter(torch.empty(size=(2 * output_feature, 1))) self.w = nn.Parameter(torch.empty(size=(input_feature, output_feature)) ) self.leakyrelu = nn.LeakyReLU(self.alpha) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.w.data, gain=1.414) nn.init.xavier_uniform_(self.a.data, gain=1.414) def forward(self, h, adj): Wh = torch.mm(h, self.w) e = self._prepare_attentional_mechanism_input(Wh) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.mm(attention, Wh) if self.concat: return F.elu(h_prime) else: return h_prime def _prepare_attentional_mechanism_input(self, Wh): Wh1 = torch.matmul(Wh, self.a[:self.output_feature, :]) Wh2 = torch.matmul(Wh, self.a[self.output_feature:, :]) e = Wh1 + Wh2.T return self.leakyrelu(e) class GAT(nn.Module): def __init__(self, input_size, hidden_size, output_size, dropout, alpha, nheads, concat=True): super(GAT, self).__init__() self.dropout = dropout self.attention = [GATLayer(input_size, hidden_size, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attention): self.add_module('attention_{}'.format(i), attention) self.out_att = GATLayer(hidden_size * nheads, output_size, dropout= dropout, alpha=alpha, concat=False) def forward(self, x, adj): x = torch.cat([att(x, adj) for att in self.attention], dim=1) x = F.elu(self.out_att(x, adj)) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = tl.load(in_ptr3 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr3 + 1) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp22 = tl.load(in_ptr3 + 2) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp31 = tl.load(in_ptr3 + 3) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp38 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp39 = tl.load(in_ptr5 + x0, xmask) tmp40 = tl.load(in_ptr6 + 0) tmp41 = tl.broadcast_to(tmp40, [XBLOCK]) tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp47 = tl.load(in_ptr6 + 1) tmp48 = tl.broadcast_to(tmp47, [XBLOCK]) tmp54 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp55 = tl.load(in_ptr6 + 2) tmp56 = tl.broadcast_to(tmp55, [XBLOCK]) tmp62 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp63 = tl.load(in_ptr6 + 3) tmp64 = tl.broadcast_to(tmp63, [XBLOCK]) tmp70 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp71 = tl.load(in_ptr8 + x0, xmask) tmp72 = tl.load(in_ptr9 + 0) tmp73 = tl.broadcast_to(tmp72, [XBLOCK]) tmp78 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp79 = tl.load(in_ptr9 + 1) tmp80 = tl.broadcast_to(tmp79, [XBLOCK]) tmp86 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp87 = tl.load(in_ptr9 + 2) tmp88 = tl.broadcast_to(tmp87, [XBLOCK]) tmp94 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp95 = tl.load(in_ptr9 + 3) tmp96 = tl.broadcast_to(tmp95, [XBLOCK]) tmp102 = tl.load(in_ptr10 + 4 * x0, xmask, eviction_policy='evict_last' ).to(tl.int1) tmp103 = tl.load(in_ptr11 + x0, xmask) tmp104 = tl.load(in_ptr12 + 0) tmp105 = tl.broadcast_to(tmp104, [XBLOCK]) tmp110 = tl.load(in_ptr10 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp111 = tl.load(in_ptr12 + 1) tmp112 = tl.broadcast_to(tmp111, [XBLOCK]) tmp118 = tl.load(in_ptr10 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp119 = tl.load(in_ptr12 + 2) tmp120 = tl.broadcast_to(tmp119, [XBLOCK]) tmp126 = tl.load(in_ptr10 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp127 = tl.load(in_ptr12 + 3) tmp128 = tl.broadcast_to(tmp127, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp1, tmp5, tmp7) tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp0, tmp8, tmp9) tmp15 = tmp2 + tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = tl.where(tmp11, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp24 = tmp2 + tmp23 tmp25 = tmp24 * tmp6 tmp26 = tl.where(tmp21, tmp24, tmp25) tmp27 = tl.where(tmp20, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp33 = tmp2 + tmp32 tmp34 = tmp33 * tmp6 tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tmp42 = tmp39 + tmp41 tmp43 = tmp42 * tmp6 tmp44 = tl.where(tmp38, tmp42, tmp43) tmp45 = tl.where(tmp0, tmp44, tmp9) tmp49 = tmp39 + tmp48 tmp50 = tmp49 * tmp6 tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tl.where(tmp11, tmp51, tmp9) tmp53 = triton_helpers.maximum(tmp45, tmp52) tmp57 = tmp39 + tmp56 tmp58 = tmp57 * tmp6 tmp59 = tl.where(tmp54, tmp57, tmp58) tmp60 = tl.where(tmp20, tmp59, tmp9) tmp61 = triton_helpers.maximum(tmp53, tmp60) tmp65 = tmp39 + tmp64 tmp66 = tmp65 * tmp6 tmp67 = tl.where(tmp62, tmp65, tmp66) tmp68 = tl.where(tmp29, tmp67, tmp9) tmp69 = triton_helpers.maximum(tmp61, tmp68) tmp74 = tmp71 + tmp73 tmp75 = tmp74 * tmp6 tmp76 = tl.where(tmp70, tmp74, tmp75) tmp77 = tl.where(tmp0, tmp76, tmp9) tmp81 = tmp71 + tmp80 tmp82 = tmp81 * tmp6 tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tl.where(tmp11, tmp83, tmp9) tmp85 = triton_helpers.maximum(tmp77, tmp84) tmp89 = tmp71 + tmp88 tmp90 = tmp89 * tmp6 tmp91 = tl.where(tmp86, tmp89, tmp90) tmp92 = tl.where(tmp20, tmp91, tmp9) tmp93 = triton_helpers.maximum(tmp85, tmp92) tmp97 = tmp71 + tmp96 tmp98 = tmp97 * tmp6 tmp99 = tl.where(tmp94, tmp97, tmp98) tmp100 = tl.where(tmp29, tmp99, tmp9) tmp101 = triton_helpers.maximum(tmp93, tmp100) tmp106 = tmp103 + tmp105 tmp107 = tmp106 * tmp6 tmp108 = tl.where(tmp102, tmp106, tmp107) tmp109 = tl.where(tmp0, tmp108, tmp9) tmp113 = tmp103 + tmp112 tmp114 = tmp113 * tmp6 tmp115 = tl.where(tmp110, tmp113, tmp114) tmp116 = tl.where(tmp11, tmp115, tmp9) tmp117 = triton_helpers.maximum(tmp109, tmp116) tmp121 = tmp103 + tmp120 tmp122 = tmp121 * tmp6 tmp123 = tl.where(tmp118, tmp121, tmp122) tmp124 = tl.where(tmp20, tmp123, tmp9) tmp125 = triton_helpers.maximum(tmp117, tmp124) tmp129 = tmp103 + tmp128 tmp130 = tmp129 * tmp6 tmp131 = tl.where(tmp126, tmp129, tmp130) tmp132 = tl.where(tmp29, tmp131, tmp9) tmp133 = triton_helpers.maximum(tmp125, tmp132) tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp69, xmask) tl.store(out_ptr2 + x0, tmp101, xmask) tl.store(out_ptr3 + x0, tmp133, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x2, xmask).to(tl.int1) tmp14 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr9 + x2, xmask).to(tl.int1) tmp24 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr11 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr13 + x2, xmask).to(tl.int1) tmp34 = tl.load(in_ptr14 + x1, xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr15 + x0, xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr16 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tmp16 * tmp5 tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tl.where(tmp0, tmp18, tmp8) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp26 = tmp24 + tmp25 tmp27 = tmp26 * tmp5 tmp28 = tl.where(tmp23, tmp26, tmp27) tmp29 = tl.where(tmp0, tmp28, tmp8) tmp31 = tmp29 - tmp30 tmp32 = tl_math.exp(tmp31) tmp36 = tmp34 + tmp35 tmp37 = tmp36 * tmp5 tmp38 = tl.where(tmp33, tmp36, tmp37) tmp39 = tl.where(tmp0, tmp38, tmp8) tmp41 = tmp39 - tmp40 tmp42 = tl_math.exp(tmp41) tl.store(out_ptr0 + x2, tmp12, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) tl.store(out_ptr2 + x2, tmp32, xmask) tl.store(out_ptr3 + x2, tmp42, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x2, tmp52, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = tl.load(in_ptr3 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr3 + 1) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp22 = tl.load(in_ptr3 + 2) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp31 = tl.load(in_ptr3 + 3) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp1, tmp5, tmp7) tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp0, tmp8, tmp9) tmp15 = tmp2 + tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = tl.where(tmp11, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp24 = tmp2 + tmp23 tmp25 = tmp24 * tmp6 tmp26 = tl.where(tmp21, tmp24, tmp25) tmp27 = tl.where(tmp20, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp33 = tmp2 + tmp32 tmp34 = tmp33 * tmp6 tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tl.store(out_ptr0 + x0, tmp37, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__log_softmax_elu_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp9 = tmp8 > tmp1 tmp10 = tmp8 * tmp3 tmp11 = libdevice.expm1(tmp10) tmp12 = tmp11 * tmp3 tmp13 = tl.where(tmp9, tmp10, tmp12) tmp15 = tmp14 > tmp1 tmp16 = tmp14 * tmp3 tmp17 = libdevice.expm1(tmp16) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp15, tmp16, tmp18) tmp20 = triton_helpers.maximum(tmp13, tmp19) tmp22 = tmp21 > tmp1 tmp23 = tmp21 * tmp3 tmp24 = libdevice.expm1(tmp23) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp22, tmp23, tmp25) tmp27 = triton_helpers.maximum(tmp20, tmp26) tmp29 = tmp28 > tmp1 tmp30 = tmp28 * tmp3 tmp31 = libdevice.expm1(tmp30) tmp32 = tmp31 * tmp3 tmp33 = tl.where(tmp29, tmp30, tmp32) tmp34 = triton_helpers.maximum(tmp27, tmp33) tmp35 = tmp7 - tmp34 tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__log_softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 1), (1, 1 ), 0), out=buf1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 1), (1, 1 ), 4), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_gt_1[grid(16)](primals_4, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_5, out=buf9) del primals_5 buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (4, 1), (1, 1 ), 0), out=buf10) buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (4, 1), (1, 1 ), 4), out=buf11) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf10, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_7, out=buf17) del primals_7 buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_8, (4, 1), (1, 1), 0), out=buf18) buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_8, (4, 1), (1, 1), 4), out=buf19) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf18, buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_9, out=buf25) del primals_9 buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, reinterpret_tensor(primals_10, (4, 1), (1, 1), 0), out=buf26) buf27 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, reinterpret_tensor(primals_10, (4, 1), (1, 1), 4), out=buf27) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf26, buf27, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_2[grid(4)](buf4, buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19, buf28, buf26, buf27, buf5, buf13, buf21, buf29, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_3[grid(16)](buf4, buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20, buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14, buf22, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf10 del buf11 del buf13 del buf18 del buf19 del buf2 del buf21 del buf26 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf6 del buf6 extern_kernels.mm(buf7, buf0, out=buf8) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf14 del buf14 extern_kernels.mm(buf15, buf9, out=buf16) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = buf22 del buf22 extern_kernels.mm(buf23, buf17, out=buf24) buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf30, buf31, 16, XBLOCK=16, num_warps=1, num_stages=1) buf32 = buf30 del buf30 extern_kernels.mm(buf31, buf25, out=buf32) buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_cat_5[grid(64)](buf8, buf16, buf24, buf32, buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf33, primals_11, out=buf34) buf35 = reinterpret_tensor(buf5, (4, 1), (1, 1), 0) del buf5 extern_kernels.mm(buf34, reinterpret_tensor(primals_12, (4, 1), (1, 1), 0), out=buf35) buf36 = reinterpret_tensor(buf29, (4, 1), (1, 1), 0) del buf29 extern_kernels.mm(buf34, reinterpret_tensor(primals_12, (4, 1), (1, 1), 4), out=buf36) buf37 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf35, buf36, buf37, 16, XBLOCK=16, num_warps=1, num_stages=1) buf38 = reinterpret_tensor(buf27, (4, 1), (1, 4), 0) del buf27 triton_poi_fused__softmax_add_leaky_relu_mul_where_6[grid(4)](buf4, buf37, buf35, buf36, buf38, 4, XBLOCK=4, num_warps=1, num_stages=1) buf39 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_7[grid(16)](buf4, buf37, buf35, buf36, buf38, buf39, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf35 del buf36 del buf38 buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf39, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = buf39 del buf39 extern_kernels.mm(buf40, buf34, out=buf41) buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_elu_8[grid(16)](buf41, buf42, 16, XBLOCK=16, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_9[grid(16)](buf42, buf43, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf42 return (buf43, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41, buf43, reinterpret_tensor(buf34, (4, 4), (1, 4), 0), reinterpret_tensor( primals_12, (1, 4), (1, 1), 4), reinterpret_tensor(primals_12, (1, 4), (1, 1), 0), reinterpret_tensor(buf33, (16, 4), (1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor( primals_10, (1, 4), (1, 1), 4), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor( primals_8, (1, 4), (1, 1), 4), reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_6, (1, 4), (1, 1), 4), reinterpret_tensor(primals_6, (1, 4), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor( primals_3, (1, 4), (1, 1), 4), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)) class GATLayer(nn.Module): def __init__(self, input_feature, output_feature, dropout, alpha, concat=True): super(GATLayer, self).__init__() self.input_feature = input_feature self.output_feature = output_feature self.alpha = alpha self.dropout = dropout self.concat = concat self.a = nn.Parameter(torch.empty(size=(2 * output_feature, 1))) self.w = nn.Parameter(torch.empty(size=(input_feature, output_feature)) ) self.leakyrelu = nn.LeakyReLU(self.alpha) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.w.data, gain=1.414) nn.init.xavier_uniform_(self.a.data, gain=1.414) def forward(self, h, adj): Wh = torch.mm(h, self.w) e = self._prepare_attentional_mechanism_input(Wh) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.mm(attention, Wh) if self.concat: return F.elu(h_prime) else: return h_prime def _prepare_attentional_mechanism_input(self, Wh): Wh1 = torch.matmul(Wh, self.a[:self.output_feature, :]) Wh2 = torch.matmul(Wh, self.a[self.output_feature:, :]) e = Wh1 + Wh2.T return self.leakyrelu(e) class GATNew(nn.Module): def __init__(self, input_size, hidden_size, output_size, dropout, alpha, nheads, concat=True): super(GATNew, self).__init__() self.dropout = dropout self.attention = [GATLayer(input_size, hidden_size, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attention): self.add_module('attention_{}'.format(i), attention) self.out_att = GATLayer(hidden_size * nheads, output_size, dropout= dropout, alpha=alpha, concat=False) def forward(self, input_0, input_1): primals_3 = self.attention_0.a primals_1 = self.attention_0.w primals_6 = self.attention_1.a primals_2 = self.attention_1.w primals_8 = self.attention_2.a primals_4 = self.attention_2.w primals_10 = self.attention_3.a primals_5 = self.attention_3.w primals_12 = self.out_att.a primals_11 = self.out_att.w primals_7 = input_0 primals_9 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
OuYangg/GNNs
GAT
false
9,518
[ "Apache-2.0" ]
0
ef5b1944490507684d603de3ae0b2aa7b5168f47
https://github.com/OuYangg/GNNs/tree/ef5b1944490507684d603de3ae0b2aa7b5168f47
SEBlock
import torch import torch.nn as nn import torch.nn.functional as F class SEBlock(nn.Module): def __init__(self, input_channels, internal_neurons): super(SEBlock, self).__init__() self.down = nn.Conv2d(in_channels=input_channels, out_channels= internal_neurons, kernel_size=1, stride=1, bias=True) self.up = nn.Conv2d(in_channels=internal_neurons, out_channels= input_channels, kernel_size=1, stride=1, bias=True) def forward(self, inputs): x = F.avg_pool2d(inputs, kernel_size=inputs.size(3)) x = self.down(x) x = F.relu(x) x = self.up(x) x = F.sigmoid(x) x = x.repeat(1, 1, inputs.size(2), inputs.size(3)) return inputs * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'internal_neurons': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_repeat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_sigmoid_2[grid(16)](buf4, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_repeat_3[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, primals_2, primals_4, buf0, buf2, buf4 class SEBlockNew(nn.Module): def __init__(self, input_channels, internal_neurons): super(SEBlockNew, self).__init__() self.down = nn.Conv2d(in_channels=input_channels, out_channels= internal_neurons, kernel_size=1, stride=1, bias=True) self.up = nn.Conv2d(in_channels=internal_neurons, out_channels= input_channels, kernel_size=1, stride=1, bias=True) def forward(self, input_0): primals_2 = self.down.weight primals_3 = self.down.bias primals_4 = self.up.weight primals_5 = self.up.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Sharingsky/resrep
SEBlock
false
9,520
[ "MIT" ]
0
a173d1bc256b75b2c902024929e406863ce48b9b
https://github.com/Sharingsky/resrep/tree/a173d1bc256b75b2c902024929e406863ce48b9b