entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
CriterionCWD
import torch import torch.nn as nn import torch._utils import torch.optim class ChannelNorm(nn.Module): def __init__(self): super(ChannelNorm, self).__init__() def forward(self, featmap): n, c, _h, _w = featmap.shape featmap = featmap.reshape((n, c, -1)) featmap = featmap.softmax(dim=-1) return featmap class CriterionCWD(nn.Module): def __init__(self, norm_type='channel', divergence='kl', temperature=4.0): super(CriterionCWD, self).__init__() if norm_type == 'channel': self.normalize = ChannelNorm() elif norm_type == 'spatial': self.normalize = nn.Softmax(dim=1) elif norm_type == 'channel_mean': self.normalize = lambda x: x.view(x.size(0), x.size(1), -1).mean(-1 ) else: self.normalize = None self.norm_type = norm_type self.temperature = 1.0 if divergence == 'mse': self.criterion = nn.MSELoss(reduction='sum') elif divergence == 'kl': self.criterion = nn.KLDivLoss(reduction='sum') self.temperature = temperature self.divergence = divergence def forward(self, preds_S, preds_T): n, c, h, w = preds_S.shape if self.normalize is not None: norm_s = self.normalize(preds_S / self.temperature) norm_t = self.normalize(preds_T.detach() / self.temperature) else: norm_s = preds_S norm_t = preds_T.detach() if self.divergence == 'kl': norm_s = norm_s.log() loss = self.criterion(norm_s, norm_t) if self.norm_type == 'channel' or self.norm_type == 'channel_mean': loss /= n * c else: loss /= n * h * w return loss * self.temperature ** 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch._utils import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp12, xmask) @triton.jit def triton_per_fused__softmax_div_log_mul_sub_sum_xlogy_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + r2, None) tmp18 = tl.load(in_ptr4 + r1, None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr5 + r1, None, eviction_policy='evict_last') tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = libdevice.isnan(tmp7).to(tl.int1) tmp9 = 0.0 tmp10 = tmp7 == tmp9 tmp11 = tl_math.log(tmp7) tmp12 = tmp7 * tmp11 tmp13 = tl.where(tmp10, tmp9, tmp12) tmp14 = float('nan') tmp15 = tl.where(tmp8, tmp14, tmp13) tmp17 = tmp16 * tmp1 tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp23 = tl_math.log(tmp22) tmp24 = tmp7 * tmp23 tmp25 = tmp15 - tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 0.0625 tmp30 = tmp28 * tmp29 tmp31 = 16.0 tmp32 = tmp30 * tmp31 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(16)](arg1_1, buf0, buf1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused__softmax_div_log_mul_sub_sum_xlogy_1[grid(1)](buf5, arg1_1, buf0, buf1, arg0_1, buf2, buf3, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf0 del buf1 del buf2 del buf3 return buf5, class ChannelNorm(nn.Module): def __init__(self): super(ChannelNorm, self).__init__() def forward(self, featmap): n, c, _h, _w = featmap.shape featmap = featmap.reshape((n, c, -1)) featmap = featmap.softmax(dim=-1) return featmap class CriterionCWDNew(nn.Module): def __init__(self, norm_type='channel', divergence='kl', temperature=4.0): super(CriterionCWDNew, self).__init__() if norm_type == 'channel': self.normalize = ChannelNorm() elif norm_type == 'spatial': self.normalize = nn.Softmax(dim=1) elif norm_type == 'channel_mean': self.normalize = lambda x: x.view(x.size(0), x.size(1), -1).mean(-1 ) else: self.normalize = None self.norm_type = norm_type self.temperature = 1.0 if divergence == 'mse': self.criterion = nn.MSELoss(reduction='sum') elif divergence == 'kl': self.criterion = nn.KLDivLoss(reduction='sum') self.temperature = temperature self.divergence = divergence def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
yubin1219/Semantic-Seg
CriterionCWD
false
4,642
[ "BSD-2-Clause" ]
0
c40bd43d3d7e44bc995b8d041736580dec084251
https://github.com/yubin1219/Semantic-Seg/tree/c40bd43d3d7e44bc995b8d041736580dec084251
SelfAttentionGated
import torch import torch.utils.data import torch.nn.functional as F def masked_softmax(x, m=None, dim=-1): """ Softmax with mask :param x: :param m: :param dim: :return: """ if m is not None: m = m.float() x = x * m e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0]) if m is not None: e_x = e_x * m softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06) return softmax class SelfAttentionGated(torch.nn.Module): """ Self-Attention Gated layer, it`s not weighted sum in the last, but just weighted math: \\softmax(W* anh(W*x)) * x Args: input_size: The number of expected features in the input x Inputs: input, mask - **input** (seq_len, batch, input_size): tensor containing the features of the input sequence. - **mask** (batch, seq_len): tensor show whether a padding index for each element in the batch. Outputs: output - **output** (seq_len, batch, input_size): gated output tensor """ def __init__(self, input_size): super(SelfAttentionGated, self).__init__() self.linear_g = torch.nn.Linear(input_size, input_size) self.linear_t = torch.nn.Linear(input_size, 1) def forward(self, x, x_mask): g_tanh = F.tanh(self.linear_g(x)) gt = self.linear_t.forward(g_tanh).squeeze(2).transpose(0, 1) gt_prop = masked_softmax(gt, x_mask, dim=1) gt_prop = gt_prop.transpose(0, 1).unsqueeze(2) x_gt = x * gt_prop return x_gt def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x2 = xindex // 16 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + 64 * x2), xmask) tmp3 = tl.load(in_ptr0 + (16 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x4 + 64 * x2), xmask) tmp7 = tl.load(in_ptr0 + (32 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x4 + 64 * x2), xmask) tmp11 = tl.load(in_ptr0 + (48 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x4 + 64 * x2), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp16 * tmp1 tmp18 = tmp5 - tmp14 tmp19 = tl_math.exp(tmp18) tmp20 = tmp19 * tmp4 tmp21 = tmp17 + tmp20 tmp22 = tmp9 - tmp14 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 * tmp8 tmp25 = tmp21 + tmp24 tmp26 = tmp13 - tmp14 tmp27 = tl_math.exp(tmp26) tmp28 = tmp27 * tmp12 tmp29 = tmp25 + tmp28 tl.store(out_ptr0 + x5, tmp14, xmask) tl.store(out_ptr1 + x5, tmp29, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex % 256 x1 = xindex // 4 % 4 x6 = xindex // 64 x3 = xindex // 64 % 4 x4 = xindex // 256 x7 = xindex % 16 x8 = xindex tmp0 = tl.load(in_ptr0 + x5, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 4 * x6), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr2 + (x7 + 16 * x4 + 64 * x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x7 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr4 + (x7 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 * tmp2 tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tmp11 = tmp7 / tmp10 tmp12 = tmp0 * tmp11 tl.store(out_ptr0 + x8, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused_exp_max_mul_sub_sum_1[grid(64)](buf3, primals_6, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(1024)](primals_3, buf3, primals_6, buf4, buf5, buf6, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del buf5 return buf6, primals_3, primals_6, buf1, buf3, primals_4 def masked_softmax(x, m=None, dim=-1): """ Softmax with mask :param x: :param m: :param dim: :return: """ if m is not None: m = m.float() x = x * m e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0]) if m is not None: e_x = e_x * m softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06) return softmax class SelfAttentionGatedNew(torch.nn.Module): """ Self-Attention Gated layer, it`s not weighted sum in the last, but just weighted math: \\softmax(W* anh(W*x)) * x Args: input_size: The number of expected features in the input x Inputs: input, mask - **input** (seq_len, batch, input_size): tensor containing the features of the input sequence. - **mask** (batch, seq_len): tensor show whether a padding index for each element in the batch. Outputs: output - **output** (seq_len, batch, input_size): gated output tensor """ def __init__(self, input_size): super(SelfAttentionGatedNew, self).__init__() self.linear_g = torch.nn.Linear(input_size, input_size) self.linear_t = torch.nn.Linear(input_size, 1) def forward(self, input_0, input_1): primals_1 = self.linear_g.weight primals_2 = self.linear_g.bias primals_4 = self.linear_t.weight primals_5 = self.linear_t.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
xdong73S/Match_LSTM_v2.0
SelfAttentionGated
false
4,643
[ "MIT" ]
0
dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729
https://github.com/xdong73S/Match_LSTM_v2.0/tree/dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729
Normalize
import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, features, epsilon=1e-06): super(Normalize, self).__init__() self.gain = nn.Parameter(torch.ones(features)) self.bias = nn.Parameter(torch.zeros(features)) self.epsilon = epsilon def forward(self, x, dim=-1): mu = x.mean(dim, keepdim=True) sigma = torch.sqrt(x.var(dim, keepdim=True) + self.epsilon) gain = self.gain bias = self.bias if dim != -1: shape = [1] * len(mu.size()) shape[dim] = self.gain.size()[0] gain = gain.view(shape) bias = bias.view(shape) return gain * (x - mu) / (sigma + self.epsilon) + bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = 1e-06 tmp27 = tmp25 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = tmp28 + tmp26 tmp30 = tmp12 / tmp29 tmp32 = tmp30 + tmp31 tl.store(in_out_ptr0 + x2, tmp32, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](buf1, primals_2, primals_1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf1, primals_1 class NormalizeNew(nn.Module): def __init__(self, features, epsilon=1e-06): super(NormalizeNew, self).__init__() self.gain = nn.Parameter(torch.ones(features)) self.bias = nn.Parameter(torch.zeros(features)) self.epsilon = epsilon def forward(self, input_0): primals_2 = self.gain primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
yuri20198/neurips19-graph-protein-design
Normalize
false
4,644
[ "MIT" ]
0
068e8cdfcbba629f996e99d3765cc2f3233f71a3
https://github.com/yuri20198/neurips19-graph-protein-design/tree/068e8cdfcbba629f996e99d3765cc2f3233f71a3
PixelWiseBias
import torch import torch.nn as nn class PixelWiseBias(nn.Module): """Some Information about PixelWiseBias""" def __init__(self, channels): super(PixelWiseBias, self).__init__() self.channels = channels self.bias = nn.Parameter(torch.zeros(channels)) def forward(self, x): x = x + self.bias[None, :, None, None] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class PixelWiseBiasNew(nn.Module): """Some Information about PixelWiseBias""" def __init__(self, channels): super(PixelWiseBiasNew, self).__init__() self.channels = channels self.bias = nn.Parameter(torch.zeros(channels)) def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
uthree/gan-image-generator2
PixelWiseBias
false
4,645
[ "MIT" ]
0
63a9f458f1f78fe13311157a219a5637a59afee4
https://github.com/uthree/gan-image-generator2/tree/63a9f458f1f78fe13311157a219a5637a59afee4
CausalConv1d
import torch from torch import nn class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4), (24, 6, 1), 0 ), primals_1, primals_3 class CausalConv1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1dNew, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, input_0): primals_1 = self.causal_conv.weight primals_2 = self.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
yuwl798180/FewRel
CausalConv1d
false
4,646
[ "MIT" ]
0
8126e440b5d5d178e221cfb4a97a69cabd771fa4
https://github.com/yuwl798180/FewRel/tree/8126e440b5d5d178e221cfb4a97a69cabd771fa4
DenseBlock
import torch from torch import nn from torch.nn import functional as F class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlock(nn.Module): def __init__(self, in_channels, filters, dilation=2): super(DenseBlock, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, minibatch): tanh = F.tanh(self.causal_conv1(minibatch)) sig = F.sigmoid(self.causal_conv2(minibatch)) out = torch.cat([minibatch, tanh * sig], dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'filters': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 6 * (-4 + x1) + 24 * x2), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.load(in_ptr2 + (x0 + 6 * (-4 + x1) + 24 * x2), tmp6 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 6), (24, 6, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(96)](buf3, primals_5, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(128)](primals_3, buf1, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_3, primals_4, buf1, buf3 class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlockNew(nn.Module): def __init__(self, in_channels, filters, dilation=2): super(DenseBlockNew, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, input_0): primals_1 = self.causal_conv1.causal_conv.weight primals_2 = self.causal_conv1.causal_conv.bias primals_4 = self.causal_conv2.causal_conv.weight primals_5 = self.causal_conv2.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
yuwl798180/FewRel
DenseBlock
false
4,647
[ "MIT" ]
0
8126e440b5d5d178e221cfb4a97a69cabd771fa4
https://github.com/yuwl798180/FewRel/tree/8126e440b5d5d178e221cfb4a97a69cabd771fa4
UnStackDelta
import torch import torch.nn as nn class UnStackDelta(nn.Module): """Reverse of StackDelta""" def __init__(self): super().__init__() def forward(self, x: 'torch.Tensor'): assert x.dim() == 4 if x.requires_grad: out = x.transpose(1, 2).contiguous() else: out = x.transpose_(1, 2).contiguous() out = out.view(out.size(0), out.size(1), out.size(2) * out.size(3)) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 4, 16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), class UnStackDeltaNew(nn.Module): """Reverse of StackDelta""" def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
wenjie-p/CAT
UnStackDelta
false
4,648
[ "Apache-2.0" ]
0
0e6904658dd3d14afe51faf1d0141ae95fef44e8
https://github.com/wenjie-p/CAT/tree/0e6904658dd3d14afe51faf1d0141ae95fef44e8
ToRGB
import torch import torch.nn as nn class ToRGB(nn.Module): """Some Information about ToRGB""" def __init__(self, input_channels): super(ToRGB, self).__init__() self.conv = nn.Conv2d(input_channels, 3, kernel_size=1, stride=1, padding=0) self.tanh = nn.Tanh() def forward(self, x): x = self.conv(x) x = self.tanh(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 4, 4), (48, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_tanh_0[grid(192)](buf1, primals_2, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf1 class ToRGBNew(nn.Module): """Some Information about ToRGB""" def __init__(self, input_channels): super(ToRGBNew, self).__init__() self.conv = nn.Conv2d(input_channels, 3, kernel_size=1, stride=1, padding=0) self.tanh = nn.Tanh() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
uthree/gan-image-generator2
ToRGB
false
4,649
[ "MIT" ]
0
63a9f458f1f78fe13311157a219a5637a59afee4
https://github.com/uthree/gan-image-generator2/tree/63a9f458f1f78fe13311157a219a5637a59afee4
MinibatchStdDev
import torch import torch.nn as nn class MinibatchStdDev(nn.Module): """ Minibatch standard deviation layer for the discriminator """ def __init__(self): """ derived class constructor """ super().__init__() def forward(self, x, alpha=1e-08): """ forward pass of the layer :param x: input activation volume :param alpha: small number for numerical stability :return: y => x appended with standard deviation constant map """ batch_size, _, height, width = x.shape y = x - x.mean(dim=0, keepdim=True) y = torch.sqrt(y.pow(2.0).mean(dim=0, keepdim=False) + alpha) y = y.mean().view(1, 1, 1, 1) y = y.repeat(batch_size, 1, height, width) y = torch.cat([x, y], 1) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp28, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class MinibatchStdDevNew(nn.Module): """ Minibatch standard deviation layer for the discriminator """ def __init__(self): """ derived class constructor """ super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zd-daniel/GANs-ZOO
MinibatchStdDev
false
4,650
[ "MIT" ]
0
fe72391e1db46616f97d1dec62441a299aa9c636
https://github.com/zd-daniel/GANs-ZOO/tree/fe72391e1db46616f97d1dec62441a299aa9c636
EncoderLayer
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class ScaledDotProductAttention(nn.Module): def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 1, -float('inf')) attn = F.softmax(attn, dim=-1) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.layer_norm(q) q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc(q)) out = q + residual return out, attn class EncoderLayer(nn.Module): """ Compose with two layers """ def __init__(self, d_model, n_head, d_k, d_v, dropout=0.1): super(EncoderLayer, self).__init__() self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) def forward(self, enc_input, slf_attn_mask=None): enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input, mask=slf_attn_mask) return enc_output, enc_slf_attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4) del primals_5 buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5) del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_div_2[grid(256)](buf3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_5[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = buf9 del buf9 triton_poi_fused_clone_6[grid(256)](buf5, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), out=buf12 ) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(256)](buf12, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf12 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 16), (16, 1), 0), reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0) del buf14 triton_poi_fused_add_7[grid(64)](buf15, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf15, buf10, primals_1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0 ), buf10, reinterpret_tensor(buf13, (16, 16), (16, 1), 0 ), primals_7, reinterpret_tensor(buf11, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_4 class ScaledDotProductAttention(nn.Module): def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 1, -float('inf')) attn = F.softmax(attn, dim=-1) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.layer_norm(q) q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc(q)) out = q + residual return out, attn class EncoderLayerNew(nn.Module): """ Compose with two layers """ def __init__(self, d_model, n_head, d_k, d_v, dropout=0.1): super(EncoderLayerNew, self).__init__() self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout) def forward(self, input_0): primals_4 = self.slf_attn.w_qs.weight primals_5 = self.slf_attn.w_ks.weight primals_6 = self.slf_attn.w_vs.weight primals_7 = self.slf_attn.fc.weight primals_2 = self.slf_attn.layer_norm.weight primals_3 = self.slf_attn.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
yuanweining/DTI
EncoderLayer
false
4,651
[ "Apache-2.0" ]
0
11eacb46a221da04d0e9b01d41c89c7ce51ea302
https://github.com/yuanweining/DTI/tree/11eacb46a221da04d0e9b01d41c89c7ce51ea302
FFModule
import torch import torch.nn as nn class FFModule(nn.Module): """Feed-forward module default output dimension = idim x0 -> LayerNorm -> FC -> Swish -> Dropout -> FC -> Dropout -> x1 x0 + res_factor * x1 -> output """ def __init__(self, idim: 'int', res_factor: 'float'=0.5, dropout: 'float'=0.0) ->None: super().__init__() assert res_factor > 0.0 and dropout >= 0.0 self._res_factor = res_factor self.ln = nn.LayerNorm([idim]) self.fc0 = nn.Linear(idim, idim * 4) self.swish = nn.SiLU() self.dropout0 = nn.Dropout(dropout) self.fc1 = nn.Linear(idim * 4, idim) self.dropout1 = nn.Dropout(dropout) def forward(self, x: 'torch.Tensor'): output = self.ln(x) output = self.fc0(output) output = self.swish(output) output = self.dropout0(output) output = self.fc1(output) output = self.dropout1(output) output = x + self._res_factor * output return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'idim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_silu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_silu_2[grid(1024)](buf3, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_mul_3[grid(256)](buf6, primals_3, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0 ), primals_6, primals_4 class FFModuleNew(nn.Module): """Feed-forward module default output dimension = idim x0 -> LayerNorm -> FC -> Swish -> Dropout -> FC -> Dropout -> x1 x0 + res_factor * x1 -> output """ def __init__(self, idim: 'int', res_factor: 'float'=0.5, dropout: 'float'=0.0) ->None: super().__init__() assert res_factor > 0.0 and dropout >= 0.0 self._res_factor = res_factor self.ln = nn.LayerNorm([idim]) self.fc0 = nn.Linear(idim, idim * 4) self.swish = nn.SiLU() self.dropout0 = nn.Dropout(dropout) self.fc1 = nn.Linear(idim * 4, idim) self.dropout1 = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.ln.weight primals_2 = self.ln.bias primals_4 = self.fc0.weight primals_5 = self.fc0.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
wenjie-p/CAT
FFModule
false
4,652
[ "Apache-2.0" ]
0
0e6904658dd3d14afe51faf1d0141ae95fef44e8
https://github.com/wenjie-p/CAT/tree/0e6904658dd3d14afe51faf1d0141ae95fef44e8
MultiHeadAttention
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class ScaledDotProductAttention(nn.Module): def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 1, -float('inf')) attn = F.softmax(attn, dim=-1) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.layer_norm(q) q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) q, attn = self.attention(q, k, v, mask=mask) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.dropout(self.fc(q)) out = q + residual return out, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (4, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0, buf1, primals_4, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_4 del primals_5 buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf4) del primals_7 buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_div_2[grid(256)](buf3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_5[grid(256)](buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) buf11 = buf9 del buf9 triton_poi_fused_clone_6[grid(256)](buf5, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), out=buf12 ) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(256)](buf12, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf12 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 16), (16, 1), 0), reinterpret_tensor(primals_9, (16, 4), (1, 16), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0) del buf14 triton_poi_fused_add_7[grid(64)](buf15, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf15, buf10, primals_1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf10, reinterpret_tensor(buf13, (16, 16), (16, 1), 0 ), primals_9, reinterpret_tensor(buf11, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_6 class ScaledDotProductAttention(nn.Module): def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 1, -float('inf')) attn = F.softmax(attn, dim=-1) output = torch.matmul(attn, v) return output, attn class MultiHeadAttentionNew(nn.Module): def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def forward(self, input_0, input_1, input_2): primals_6 = self.w_qs.weight primals_7 = self.w_ks.weight primals_8 = self.w_vs.weight primals_9 = self.fc.weight primals_4 = self.layer_norm.weight primals_5 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
yuanweining/DTI
MultiHeadAttention
false
4,653
[ "Apache-2.0" ]
0
11eacb46a221da04d0e9b01d41c89c7ce51ea302
https://github.com/yuanweining/DTI/tree/11eacb46a221da04d0e9b01d41c89c7ce51ea302
Lookahead
import torch import torch.nn as nn import torch.nn.functional as F class Lookahead(nn.Module): def __init__(self, n_features, context): super(Lookahead, self).__init__() assert context > 0 self.context = context self.n_features = n_features self.pad = 0, self.context - 1 self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size =self.context, stride=1, groups=self.n_features, padding=0, bias=None) def forward(self, x): x = x.transpose(1, 2) x = F.pad(x, pad=self.pad, value=0) x = self.conv(x) x = x.transpose(1, 2).contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_features': 4, 'context': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 7 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = x2 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp2 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x2 + 7 * y3), tmp3, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(16, 7)](primals_1, buf0, 16, 7, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf1, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf1 return buf2, primals_2, buf0 class LookaheadNew(nn.Module): def __init__(self, n_features, context): super(LookaheadNew, self).__init__() assert context > 0 self.context = context self.n_features = n_features self.pad = 0, self.context - 1 self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size =self.context, stride=1, groups=self.n_features, padding=0, bias=None) def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
wenjie-p/CAT
Lookahead
false
4,654
[ "Apache-2.0" ]
0
0e6904658dd3d14afe51faf1d0141ae95fef44e8
https://github.com/wenjie-p/CAT/tree/0e6904658dd3d14afe51faf1d0141ae95fef44e8
PositionGenerator
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, hidden_size, variance_epsilon=1e-12): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(hidden_size)) self.beta = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class PositionGenerator(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, d_model): super(PositionGenerator, self).__init__() self.norm = LayerNorm(d_model) self.proj = nn.Linear(d_model, 3) def forward(self, x, mask): mask = mask.unsqueeze(-1).float() out_masked = self.norm(x) * mask projected = self.proj(out_masked) return projected def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 256 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x5, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (3, 4), (4, 1)) assert_size_stride(primals_6, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_2, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](primals_3, buf0, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(1024)](buf1, primals_1, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((256, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 return reinterpret_tensor(buf3, (4, 4, 4, 4, 3), (192, 48, 12, 3, 1), 0 ), primals_1, primals_2, reinterpret_tensor(buf2, (256, 4), (4, 1), 0 ), primals_5 class LayerNorm(nn.Module): def __init__(self, hidden_size, variance_epsilon=1e-12): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(hidden_size)) self.beta = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class PositionGeneratorNew(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, d_model): super(PositionGeneratorNew, self).__init__() self.norm = LayerNorm(d_model) self.proj = nn.Linear(d_model, 3) def forward(self, input_0, input_1): primals_3 = self.norm.gamma primals_4 = self.norm.beta primals_5 = self.proj.weight primals_6 = self.proj.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
zhandand/MolRep
PositionGenerator
false
4,655
[ "MIT" ]
0
d81de22000f1245e1d9280af0cb329e745ce4bde
https://github.com/zhandand/MolRep/tree/d81de22000f1245e1d9280af0cb329e745ce4bde
EnergyEstimateWidthRescale
import torch from torch import nn as nn from torch.nn.parameter import Parameter class EnergyEstimateWidthRescale(nn.Module): def __init__(self, scales): super(EnergyEstimateWidthRescale, self).__init__() self.scales = Parameter(torch.tensor(scales, dtype=torch.float32), requires_grad=False) def forward(self, x): assert x.dim() != 1 x = x / self.scales return torch.cat([(x[:, 0].detach() * x[:, 1]).unsqueeze(1), x[:, 1 :-2] * x[:, 2:-1], (x[:, -2] * x[:, -1].detach()).unsqueeze(1)], dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scales': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp5 / tmp7 tmp9 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp9 / tmp7 tmp11 = tmp8 * tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 2, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tmp14 & tmp16 tmp18 = tl.load(in_ptr0 + (16 + x0 + 16 * (-1 + x1) + 64 * x2), tmp17 & xmask, other=0.0) tmp19 = tmp18 / tmp7 tmp20 = tl.load(in_ptr0 + (32 + x0 + 16 * (-1 + x1) + 64 * x2), tmp17 & xmask, other=0.0) tmp21 = tmp20 / tmp7 tmp22 = tmp19 * tmp21 tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp17, tmp22, tmp23) tmp25 = tmp0 >= tmp15 tl.full([1], 3, tl.int64) tmp28 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tmp28 / tmp7 tmp30 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tmp30 / tmp7 tmp32 = tmp29 * tmp31 tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp25, tmp32, tmp33) tmp35 = tl.where(tmp17, tmp24, tmp34) tmp36 = tl.where(tmp4, tmp13, tmp35) tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(192)](arg0_1, arg1_1, buf0, 192, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class EnergyEstimateWidthRescaleNew(nn.Module): def __init__(self, scales): super(EnergyEstimateWidthRescaleNew, self).__init__() self.scales = Parameter(torch.tensor(scales, dtype=torch.float32), requires_grad=False) def forward(self, input_0): arg1_1 = self.scales arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
zhanhuijing/ECC_PYCHARM
EnergyEstimateWidthRescale
false
4,656
[ "MIT" ]
0
c5e8fb747d70a2548e9866356f8dacc8df26a077
https://github.com/zhanhuijing/ECC_PYCHARM/tree/c5e8fb747d70a2548e9866356f8dacc8df26a077
Actor
import torch from torch import nn import torch.nn.functional as F class Actor(nn.Module): """Actor model Parameters: args (object): Parameter class """ def __init__(self, state_dim, action_dim, wwid): super(Actor, self).__init__() self.wwid = torch.Tensor([wwid]) l1 = 400 l2 = 300 self.f1 = nn.Linear(state_dim, l1) self.ln1 = nn.LayerNorm(l1) self.f2 = nn.Linear(l1, l2) self.ln2 = nn.LayerNorm(l2) self.w_out = nn.Linear(l2, action_dim) def forward(self, input): """Method to forward propagate through the actor's graph Parameters: input (tensor): states Returns: action (tensor): actions """ out = F.elu(self.f1(input)) out = self.ln1(out) out = F.elu(self.f2(out)) out = self.ln2(out) return torch.sigmoid(self.w_out(out)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4, 'wwid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_elu_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 400 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 400 * x0), rmask, other=0.0) tmp31 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tl.where(rmask, tmp8, 0) tmp11 = tl.broadcast_to(tmp8, [RBLOCK]) tmp13 = tl.where(rmask, tmp11, 0) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp15 = tl.full([1], 400, tl.int32) tmp16 = tmp15.to(tl.float32) tmp17 = tmp14 / tmp16 tmp18 = tmp8 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tmp24 = 400.0 tmp25 = tmp23 / tmp24 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tmp29 = tmp7 - tmp17 tmp30 = tmp29 * tmp28 tmp32 = tmp30 * tmp31 tmp34 = tmp32 + tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp28, None) tl.store(out_ptr1 + (r1 + 400 * x0), tmp34, rmask) tl.store(out_ptr0 + x0, tmp17, None) @triton.jit def triton_per_fused_elu_native_layer_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 300 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 300 * x0), rmask, other=0.0) tmp31 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tl.where(rmask, tmp8, 0) tmp11 = tl.broadcast_to(tmp8, [RBLOCK]) tmp13 = tl.where(rmask, tmp11, 0) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp15 = tl.full([1], 300, tl.int32) tmp16 = tmp15.to(tl.float32) tmp17 = tmp14 / tmp16 tmp18 = tmp8 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tmp24 = 300.0 tmp25 = tmp23 / tmp24 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tmp29 = tmp7 - tmp17 tmp30 = tmp29 * tmp28 tmp32 = tmp30 * tmp31 tmp34 = tmp32 + tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp28, None) tl.store(out_ptr1 + (r1 + 300 * x2 + 1216 * x3), tmp34, rmask) tl.store(out_ptr0 + x0, tmp17, None) @triton.jit def triton_poi_fused_elu_native_layer_norm_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (400,), (1,)) assert_size_stride(primals_5, (400,), (1,)) assert_size_stride(primals_6, (300, 400), (400, 1)) assert_size_stride(primals_7, (300,), (1,)) assert_size_stride(primals_8, (300,), (1,)) assert_size_stride(primals_9, (300,), (1,)) assert_size_stride(primals_10, (4, 300), (300, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 400), (6400, 1600, 400, 1), torch.float32) get_raw_stream(0) triton_per_fused_elu_native_layer_norm_0[grid(64)](buf4, buf0, primals_4, primals_5, buf1, buf5, 64, 400, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 300), (1, 400 ), 0), alpha=1, beta=1, out=buf6) del primals_7 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 buf11 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) triton_per_fused_elu_native_layer_norm_1[grid(64)](buf10, buf6, primals_8, primals_9, buf7, buf11, 64, 300, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((64, 300), (300, 1), torch.float32) triton_poi_fused_elu_native_layer_norm_view_2[grid(19200)](buf11, buf12, 19200, XBLOCK=256, num_warps=4, num_stages=1) del buf11 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_10, (300, 4), ( 1, 300), 0), out=buf13) buf14 = reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf13 triton_poi_fused_sigmoid_3[grid(256)](buf14, primals_11, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 return buf14, primals_4, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, buf4, reinterpret_tensor(buf5, (64, 400 ), (400, 1), 0), buf6, buf7, buf10, buf12, buf14, primals_10, primals_6 class ActorNew(nn.Module): """Actor model Parameters: args (object): Parameter class """ def __init__(self, state_dim, action_dim, wwid): super(ActorNew, self).__init__() self.wwid = torch.Tensor([wwid]) l1 = 400 l2 = 300 self.f1 = nn.Linear(state_dim, l1) self.ln1 = nn.LayerNorm(l1) self.f2 = nn.Linear(l1, l2) self.ln2 = nn.LayerNorm(l2) self.w_out = nn.Linear(l2, action_dim) def forward(self, input_0): primals_1 = self.f1.weight primals_2 = self.f1.bias primals_4 = self.ln1.weight primals_5 = self.ln1.bias primals_6 = self.f2.weight primals_7 = self.f2.bias primals_8 = self.ln2.weight primals_9 = self.ln2.bias primals_10 = self.w_out.weight primals_11 = self.w_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
zhan0903/cerl
Actor
false
4,657
[ "Apache-2.0" ]
0
6fb8aca9cb78b72947237edf2b9ed8362bd43829
https://github.com/zhan0903/cerl/tree/6fb8aca9cb78b72947237edf2b9ed8362bd43829
Encoder
import torch from torch import nn class Encoder(nn.Module): def __init__(self, embedding_dim, nhead, dropout, k=4): super(Encoder, self).__init__() self.transformer = nn.TransformerEncoderLayer(embedding_dim, nhead, dim_feedforward=k * embedding_dim, dropout=dropout, activation= 'gelu') def forward(self, x): x = x.transpose(0, 1) h = self.transformer(x) out = h.mean(dim=0) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'embedding_dim': 4, 'nhead': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__safe_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__safe_softmax_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tl.load(in_out_ptr0 + (x1 + 4 * y0), xmask & ymask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x1 + 4 * y0), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mean_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr3 + x0, xmask) tmp9 = tl.load(in_ptr4 + x0, xmask) tmp11 = tl.load(in_ptr0 + (4 + x0), xmask) tmp12 = tl.load(in_ptr1 + 1) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp15 = tl.load(in_ptr2 + 1) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp21 = tl.load(in_ptr0 + (8 + x0), xmask) tmp22 = tl.load(in_ptr1 + 2) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp25 = tl.load(in_ptr2 + 2) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp31 = tl.load(in_ptr0 + (12 + x0), xmask) tmp32 = tl.load(in_ptr1 + 3) tmp33 = tl.broadcast_to(tmp32, [XBLOCK]) tmp35 = tl.load(in_ptr2 + 3) tmp36 = tl.broadcast_to(tmp35, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp3 * tmp5 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tmp14 = tmp11 - tmp13 tmp17 = tmp14 * tmp16 tmp18 = tmp17 * tmp7 tmp19 = tmp18 + tmp9 tmp20 = tmp10 + tmp19 tmp24 = tmp21 - tmp23 tmp27 = tmp24 * tmp26 tmp28 = tmp27 * tmp7 tmp29 = tmp28 + tmp9 tmp30 = tmp20 + tmp29 tmp34 = tmp31 - tmp33 tmp37 = tmp34 * tmp36 tmp38 = tmp37 * tmp7 tmp39 = tmp38 + tmp9 tmp40 = tmp30 + tmp39 tmp41 = 4.0 tmp42 = tmp40 / tmp41 tl.store(out_ptr0 + x0, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (4, 16), (16, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (1, 4, 4, 1), (16, 1, 4, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf1, (1, 4, 1, 4), (16, 1, 16, 4), 0) del buf1 triton_poi_fused_mul_1[grid(16)](buf4, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 0), 0 ), reinterpret_tensor(buf4, (4, 1, 4), (1, 0, 4), 0), out=buf5) buf6 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_2[grid(64)](buf5, buf6, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_3[grid(64)](buf5, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 triton_poi_fused_add_4[grid(16)](buf8, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (4, 4, 1), (1, 4, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 1, 4, 1), (4, 1, 1, 4), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf9, buf10, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_add_native_layer_norm_6[grid(4, 4)](buf12, primals_1, primals_5, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del primals_5 buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_native_layer_norm_7[grid(4)](buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_8[grid(16)](buf12, buf13, buf14, primals_6, primals_7, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf16 = reinterpret_tensor(buf6, (4, 16), (16, 1), 0) del buf6 extern_kernels.addmm(primals_9, buf15, reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_9 buf17 = reinterpret_tensor(buf5, (4, 16), (16, 1), 0) del buf5 triton_poi_fused_gelu_9[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_10, (16, 4), (1, 16), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_add_10[grid(16)](buf19, buf15, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 buf20 = buf14 del buf14 buf21 = buf13 del buf13 triton_poi_fused_native_layer_norm_7[grid(4)](buf19, buf20, buf21, 4, XBLOCK=4, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_native_layer_norm_11[grid(4)](buf19, buf20, buf21, primals_12, primals_13, buf22, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf20 del buf21 del primals_13 return (buf22, primals_6, primals_12, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf7, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf12, buf15, buf16, buf17, buf19, primals_10, primals_8, primals_4, reinterpret_tensor(buf8, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 4, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 16), 0)) class EncoderNew(nn.Module): def __init__(self, embedding_dim, nhead, dropout, k=4): super(EncoderNew, self).__init__() self.transformer = nn.TransformerEncoderLayer(embedding_dim, nhead, dim_feedforward=k * embedding_dim, dropout=dropout, activation= 'gelu') def forward(self, input_0): primals_2 = self.transformer.self_attn.in_proj_weight primals_3 = self.transformer.self_attn.in_proj_bias primals_1 = self.transformer.self_attn.out_proj.weight primals_5 = self.transformer.self_attn.out_proj.bias primals_8 = self.transformer.linear1.weight primals_9 = self.transformer.linear1.bias primals_10 = self.transformer.linear2.weight primals_6 = self.transformer.linear2.bias primals_7 = self.transformer.norm1.weight primals_11 = self.transformer.norm1.bias primals_12 = self.transformer.norm2.weight primals_13 = self.transformer.norm2.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
yukiar/distil_wic
Encoder
false
4,658
[ "MIT" ]
0
1f9c5c7252105dd9f4f264f8533753f0cd08ca5b
https://github.com/yukiar/distil_wic/tree/1f9c5c7252105dd9f4f264f8533753f0cd08ca5b
GCN
from torch.nn import Module import math import torch from torchvision.transforms import functional as F import torch.utils.data import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules.module import Module class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters(xariv=True) def reset_parameters(self, xariv=False): if xariv: torch.nn.init.xavier_normal_(self.weight, gain=1.0) if self.bias is not None: torch.nn.init.constant_(self.bias, 0) else: stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.mm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid, bias=True) self.gc2 = GraphConvolution(nhid, nhid, bias=True) self.dropout = dropout def forward(self, input, adj): x1 = F.relu(self.gc1(input, adj)) x1 = F.dropout(x1, self.dropout, training=self.training) x2 = F.relu(self.gc2(x1, adj)) x2 = F.dropout(x2, self.dropout, training=self.training) return x2 + input def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import math import torch.utils.data import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = tmp4 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3 del buf3 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(16)](buf4, primals_6, primals_2, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del primals_6 return buf5, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters(xariv=True) def reset_parameters(self, xariv=False): if xariv: torch.nn.init.xavier_normal_(self.weight, gain=1.0) if self.bias is not None: torch.nn.init.constant_(self.bias, 0) else: stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.mm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid, bias=True) self.gc2 = GraphConvolution(nhid, nhid, bias=True) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
zhanwenchen/Scene-Graph-Benchmark.pytorch
GCN
false
4,659
[ "MIT" ]
0
c86475bcbdaefcc1656a2890194355c2b32aa694
https://github.com/zhanwenchen/Scene-Graph-Benchmark.pytorch/tree/c86475bcbdaefcc1656a2890194355c2b32aa694
ApplySingleAttention
import torch import torch.utils.data import torch.nn as nn from torch.nn.utils import weight_norm class FCNet(nn.Module): def __init__(self, in_size, out_size, activate=None, drop=0.0): super(FCNet, self).__init__() self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None) self.drop_value = drop self.drop = nn.Dropout(drop) self.activate = activate.lower() if activate is not None else None if activate == 'relu': self.ac_fn = nn.ReLU() elif activate == 'sigmoid': self.ac_fn = nn.Sigmoid() elif activate == 'tanh': self.ac_fn = nn.Tanh() def forward(self, x): if self.drop_value > 0: x = self.drop(x) x = self.lin(x) if self.activate is not None: x = self.ac_fn(x) return x class ApplySingleAttention(nn.Module): def __init__(self, v_features, q_features, mid_features, drop=0.0): super(ApplySingleAttention, self).__init__() self.lin_v = FCNet(v_features, mid_features, activate='relu', drop=drop ) self.lin_q = FCNet(q_features, mid_features, activate='relu', drop=drop ) self.lin_atten = FCNet(mid_features, mid_features, drop=drop) def forward(self, v, q, atten): """ v = batch, num_obj, dim q = batch, que_len, dim atten: batch x v_num x q_num """ v_ = self.lin_v(v).transpose(1, 2).unsqueeze(2) q_ = self.lin_q(q).transpose(1, 2).unsqueeze(3) v_ = torch.matmul(v_, atten.unsqueeze(1)) h_ = torch.matmul(v_, q_) h_ = h_.squeeze(3).squeeze(2) atten_h = self.lin_atten(h_.unsqueeze(1)) return atten_h def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'v_features': 4, 'q_features': 4, 'mid_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn from torch.nn.utils import weight_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (), ()) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (), ()) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_div_mul_norm_0[grid(1)](buf5, primals_6, primals_5, buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(buf6, (4, 4), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf3, primals_3, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](primals_9, buf9, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out=buf10) buf11 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_1[grid(16, 4)](buf7, primals_7, buf11, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(buf10, reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12) buf13 = empty_strided_cuda((), (), torch.float32) buf14 = buf13 del buf13 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_div_mul_norm_0[grid(1)](buf14, primals_11, primals_10, buf15, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf16) del primals_12 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(64)](buf7, primals_7, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf7 del primals_7 buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(64)](buf3, primals_3, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_3 return (reinterpret_tensor(buf16, (4, 1, 4), (4, 4, 1), 0), buf2, buf6, buf15, primals_1, primals_2, primals_5, primals_6, primals_10, primals_11, buf1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf14, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf15, reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf11, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0), buf17, buf18) class FCNet(nn.Module): def __init__(self, in_size, out_size, activate=None, drop=0.0): super(FCNet, self).__init__() self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None) self.drop_value = drop self.drop = nn.Dropout(drop) self.activate = activate.lower() if activate is not None else None if activate == 'relu': self.ac_fn = nn.ReLU() elif activate == 'sigmoid': self.ac_fn = nn.Sigmoid() elif activate == 'tanh': self.ac_fn = nn.Tanh() def forward(self, x): if self.drop_value > 0: x = self.drop(x) x = self.lin(x) if self.activate is not None: x = self.ac_fn(x) return x class ApplySingleAttentionNew(nn.Module): def __init__(self, v_features, q_features, mid_features, drop=0.0): super(ApplySingleAttentionNew, self).__init__() self.lin_v = FCNet(v_features, mid_features, activate='relu', drop=drop ) self.lin_q = FCNet(q_features, mid_features, activate='relu', drop=drop ) self.lin_atten = FCNet(mid_features, mid_features, drop=drop) def forward(self, input_0, input_1, input_2): primals_3 = self.lin_v.lin.bias primals_1 = self.lin_v.lin.weight_g primals_2 = self.lin_v.lin.weight_v primals_7 = self.lin_q.lin.bias primals_5 = self.lin_q.lin.weight_g primals_6 = self.lin_q.lin.weight_v primals_12 = self.lin_atten.lin.bias primals_10 = self.lin_atten.lin.weight_g primals_11 = self.lin_atten.lin.weight_v primals_4 = input_0 primals_8 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
zhanwenchen/Scene-Graph-Benchmark.pytorch
ApplySingleAttention
false
4,660
[ "MIT" ]
0
c86475bcbdaefcc1656a2890194355c2b32aa694
https://github.com/zhanwenchen/Scene-Graph-Benchmark.pytorch/tree/c86475bcbdaefcc1656a2890194355c2b32aa694
Fcn8s
import torch import numpy as np import torch.nn as nn def _upsampling_weights(in_channels, out_channels, kernel_size): factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(weight).float() class Fcn8s(nn.Module): def __init__(self, n_class=21): """ Create the FCN-8s network the the given number of classes. Args: n_class: The number of semantic classes. """ super(Fcn8s, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1) self.relu1_2 = nn.ReLU(inplace=True) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1) self.relu2_2 = nn.ReLU(inplace=True) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_3 = nn.ReLU(inplace=True) self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_2 = nn.ReLU(inplace=True) self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_3 = nn.ReLU(inplace=True) self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_1 = nn.ReLU(inplace=True) self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_2 = nn.ReLU(inplace=True) self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_3 = nn.ReLU(inplace=True) self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.fc6 = nn.Conv2d(512, 4096, 7) self.relu6 = nn.ReLU(inplace=True) self.drop6 = nn.Dropout2d() self.fc7 = nn.Conv2d(4096, 4096, 1) self.relu7 = nn.ReLU(inplace=True) self.drop7 = nn.Dropout2d() self.score_fr = nn.Conv2d(4096, n_class, 1) self.score_pool3 = nn.Conv2d(256, n_class, 1) self.score_pool4 = nn.Conv2d(512, n_class, 1) self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 4, stride=2, bias=True) self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8, bias=False) self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 4, stride =2, bias=False) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.zero_() if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): assert m.kernel_size[0] == m.kernel_size[1] initial_weight = _upsampling_weights(m.in_channels, m. out_channels, m.kernel_size[0]) m.weight.data.copy_(initial_weight) def forward(self, image): h = self.relu1_1(self.conv1_1(image)) h = self.relu1_2(self.conv1_2(h)) h = self.pool1(h) h = self.relu2_1(self.conv2_1(h)) h = self.relu2_2(self.conv2_2(h)) h = self.pool2(h) h = self.relu3_1(self.conv3_1(h)) h = self.relu3_2(self.conv3_2(h)) h = self.relu3_3(self.conv3_3(h)) h = self.pool3(h) pool3 = h h = self.relu4_1(self.conv4_1(h)) h = self.relu4_2(self.conv4_2(h)) h = self.relu4_3(self.conv4_3(h)) h = self.pool4(h) pool4 = h h = self.relu5_1(self.conv5_1(h)) h = self.relu5_2(self.conv5_2(h)) h = self.relu5_3(self.conv5_3(h)) h = self.pool5(h) h = self.relu6(self.fc6(h)) h = self.drop6(h) h = self.relu7(self.fc7(h)) h = self.drop7(h) h = self.score_fr(h) h = self.upscore2(h) upscore2 = h h = self.score_pool4(pool4) h = h[:, :, 5:5 + upscore2.size()[2], 5:5 + upscore2.size()[3]] score_pool4c = h h = upscore2 + score_pool4c h = self.upscore_pool4(h) upscore_pool4 = h h = self.score_pool3(pool3) h = h[:, :, 9:9 + upscore_pool4.size()[2], 9:9 + upscore_pool4.size ()[3]] score_pool3c = h h = upscore_pool4 + score_pool3c h = self.upscore8(h) h = h[:, :, 31:31 + image.size()[2], 31:31 + image.size()[3] ].contiguous() return h def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 49 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 441 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 21 y1 = yindex // 21 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 21 * x2 + 336 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 441 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 21 y1 = yindex // 21 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 21 * x2 + 5376 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 17572864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4393216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 131 x2 = xindex // 8384 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 33536 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 33536 * x2), xmask) tmp3 = tl.load(in_ptr0 + (16768 + x0 + 128 * x1 + 33536 * x2), xmask) tmp5 = tl.load(in_ptr0 + (16832 + x0 + 128 * x1 + 33536 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8786432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 8448 % 66 x1 = xindex // 128 % 66 x0 = xindex % 128 x3 = xindex // 557568 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 131, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16768 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (16896 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = xindex // 256 % 33 x2 = xindex // 8448 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 33792 * x2), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 33792 * x2), xmask) tmp3 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 33792 * x2), xmask) tmp5 = tl.load(in_ptr0 + (17152 + x0 + 512 * x1 + 33792 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 8704 % 17 x1 = xindex // 512 % 17 x0 = xindex % 512 x3 = xindex // 147968 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 33, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16896 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (17408 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 4608 % 9 x1 = xindex // 512 % 9 x0 = xindex % 512 x3 = xindex // 41472 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 17, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (8704 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (9216 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 756 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 21 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_24(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 5376 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 21 x2 = xindex // 168 % 8 x3 = xindex // 1344 x5 = xindex % 168 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1890 + x5 + 357 * x2 + 6069 * x3), xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 27216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x2 = xindex // 378 % 18 x3 = xindex // 6804 x5 = xindex % 378 x0 = xindex % 21 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (6426 + x5 + 693 * x2 + 22869 * x3), xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_clone_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl .constexpr, XBLOCK: tl.constexpr): ynumel = 84 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex % 64 x3 = xindex // 64 y0 = yindex % 21 y1 = yindex // 21 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (99603 + y0 + 21 * x2 + 3192 * x3 + 485184 * y1), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x5 + 4096 * y4), tmp0, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1)) assert_size_stride(primals_29, (4096,), (1,)) assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_31, (4096,), (1,)) assert_size_stride(primals_32, (21, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_33, (21,), (1,)) assert_size_stride(primals_34, (21, 21, 4, 4), (336, 16, 4, 1)) assert_size_stride(primals_35, (21,), (1,)) assert_size_stride(primals_36, (21, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_37, (21,), (1,)) assert_size_stride(primals_38, (21, 21, 4, 4), (336, 16, 4, 1)) assert_size_stride(primals_39, (21, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_40, (21,), (1,)) assert_size_stride(primals_41, (21, 21, 16, 16), (5376, 256, 16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512), torch.float32) triton_poi_fused_9[grid(2097152, 49)](primals_28, buf14, 2097152, 49, XBLOCK=32, YBLOCK=64, num_warps=8, num_stages=1) del primals_28 buf15 = empty_strided_cuda((21, 21, 4, 4), (336, 1, 84, 21), torch. float32) triton_poi_fused_10[grid(441, 16)](primals_34, buf15, 441, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_34 buf16 = empty_strided_cuda((21, 21, 4, 4), (336, 1, 84, 21), torch. float32) triton_poi_fused_10[grid(441, 16)](primals_38, buf16, 441, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_38 buf17 = empty_strided_cuda((21, 21, 16, 16), (5376, 1, 336, 21), torch.float32) triton_poi_fused_11[grid(441, 256)](primals_41, buf17, 441, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_41 buf18 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(100, 100), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_12[grid(17572864)](buf19, primals_2, 17572864, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_12[grid(17572864)](buf21, primals_5, 17572864, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64 ), torch.float32) buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64 ), torch.int8) triton_poi_fused_max_pool2d_with_indices_13[grid(4393216)](buf21, buf22, buf23, 4393216, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_14[grid(8786432)](buf25, primals_7, 8786432, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_14[grid(8786432)](buf27, primals_9, 8786432, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.float32) buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_15[grid(2230272)](buf27, buf28, buf29, 2230272, XBLOCK=512, num_warps=8, num_stages=1) buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_16[grid(4460544)](buf31, primals_11, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf33 = buf32 del buf32 triton_poi_fused_convolution_relu_16[grid(4460544)](buf33, primals_13, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_16[grid(4460544)](buf35, primals_15, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.float32) buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_17[grid(1115136)](buf35, buf36, buf37, 1115136, XBLOCK=512, num_warps=8, num_stages=1) buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_18[grid(2230272)](buf39, primals_17, 2230272, XBLOCK=512, num_warps=8, num_stages=1) del primals_17 buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf41 = buf40 del buf40 triton_poi_fused_convolution_relu_18[grid(2230272)](buf41, primals_19, 2230272, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_18[grid(2230272)](buf43, primals_21, 2230272, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.float32) buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_19[grid(591872)](buf43, buf44, buf45, 591872, XBLOCK=1024, num_warps=4, num_stages=1) buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf47 = buf46 del buf46 triton_poi_fused_convolution_relu_20[grid(591872)](buf47, primals_23, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf49 = buf48 del buf48 triton_poi_fused_convolution_relu_20[grid(591872)](buf49, primals_25, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf51 = buf50 del buf50 triton_poi_fused_convolution_relu_20[grid(591872)](buf51, primals_27, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.float32) buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_21[grid(165888)](buf51, buf52, buf53, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf55 = buf54 del buf54 triton_poi_fused_convolution_relu_22[grid(147456)](buf55, primals_29, 147456, XBLOCK=512, num_warps=8, num_stages=1) del primals_29 buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf57 = buf56 del buf56 triton_poi_fused_convolution_relu_22[grid(147456)](buf57, primals_31, 147456, XBLOCK=512, num_warps=8, num_stages=1) del primals_31 buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 21, 3, 3), (189, 1, 63, 21)) buf59 = buf58 del buf58 triton_poi_fused_convolution_23[grid(756)](buf59, primals_33, 756, XBLOCK=256, num_warps=4, num_stages=1) del primals_33 buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 21, 8, 8), (1344, 1, 168, 21)) buf61 = extern_kernels.convolution(buf44, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 21, 17, 17), (6069, 1, 357, 21)) buf62 = buf60 del buf60 triton_poi_fused_add_convolution_24[grid(5376)](buf62, primals_35, buf61, primals_37, 5376, XBLOCK=256, num_warps=4, num_stages=1) del buf61 del primals_35 del primals_37 buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf63, (4, 21, 18, 18), (6804, 1, 378, 21)) buf64 = extern_kernels.convolution(buf36, primals_39, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 21, 33, 33), (22869, 1, 693, 21)) buf65 = buf63 del buf63 triton_poi_fused_add_25[grid(27216)](buf65, buf64, primals_40, 27216, XBLOCK=256, num_warps=4, num_stages=1) del buf64 del primals_40 buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 21, 152, 152), (485184, 1, 3192, 21)) buf67 = empty_strided_cuda((4, 21, 64, 64), (86016, 4096, 64, 1), torch.float32) triton_poi_fused_clone_26[grid(84, 4096)](buf66, buf67, 84, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf66 return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32, buf15, primals_36, buf16, primals_39, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36, buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51, buf52, buf53, buf55, buf57, buf59, buf62, buf65) def _upsampling_weights(in_channels, out_channels, kernel_size): factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(weight).float() class Fcn8sNew(nn.Module): def __init__(self, n_class=21): """ Create the FCN-8s network the the given number of classes. Args: n_class: The number of semantic classes. """ super(Fcn8sNew, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1) self.relu1_2 = nn.ReLU(inplace=True) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1) self.relu2_2 = nn.ReLU(inplace=True) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1) self.relu3_3 = nn.ReLU(inplace=True) self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_2 = nn.ReLU(inplace=True) self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu4_3 = nn.ReLU(inplace=True) self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_1 = nn.ReLU(inplace=True) self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_2 = nn.ReLU(inplace=True) self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1) self.relu5_3 = nn.ReLU(inplace=True) self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.fc6 = nn.Conv2d(512, 4096, 7) self.relu6 = nn.ReLU(inplace=True) self.drop6 = nn.Dropout2d() self.fc7 = nn.Conv2d(4096, 4096, 1) self.relu7 = nn.ReLU(inplace=True) self.drop7 = nn.Dropout2d() self.score_fr = nn.Conv2d(4096, n_class, 1) self.score_pool3 = nn.Conv2d(256, n_class, 1) self.score_pool4 = nn.Conv2d(512, n_class, 1) self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 4, stride=2, bias=True) self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8, bias=False) self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 4, stride =2, bias=False) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.zero_() if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): assert m.kernel_size[0] == m.kernel_size[1] initial_weight = _upsampling_weights(m.in_channels, m. out_channels, m.kernel_size[0]) m.weight.data.copy_(initial_weight) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_28 = self.fc6.weight primals_29 = self.fc6.bias primals_30 = self.fc7.weight primals_31 = self.fc7.bias primals_32 = self.score_fr.weight primals_33 = self.score_fr.bias primals_39 = self.score_pool3.weight primals_35 = self.score_pool3.bias primals_36 = self.score_pool4.weight primals_37 = self.score_pool4.bias primals_34 = self.upscore2.weight primals_40 = self.upscore2.bias primals_41 = self.upscore8.weight primals_38 = self.upscore_pool4.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41]) return output[0]
jgibson2/crfasrnn_pytorch
Fcn8s
false
4,661
[ "MIT" ]
0
04c8477343bc1a186b3712f876b497f00e43ae72
https://github.com/jgibson2/crfasrnn_pytorch/tree/04c8477343bc1a186b3712f876b497f00e43ae72
BiaffineAttention
import torch import torch.nn as nn from torch.nn import Module as Layer class BiaffineAttention(Layer): """Implements a biaffine attention operator for binary relation classification.""" def __init__(self, in_features, out_features): super(BiaffineAttention, self).__init__() self.in_features = in_features self.out_features = out_features self.bilinear = nn.Bilinear(in_features, in_features, out_features, bias=False) self.linear = nn.Linear(2 * in_features, out_features) def forward(self, x_1, x_2): return self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2), dim=-1)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Module as Layer assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_3, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor( primals_2, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_3, primals_2, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (64, 8), (8, 1), 0) class BiaffineAttentionNew(Layer): """Implements a biaffine attention operator for binary relation classification.""" def __init__(self, in_features, out_features): super(BiaffineAttentionNew, self).__init__() self.in_features = in_features self.out_features = out_features self.bilinear = nn.Bilinear(in_features, in_features, out_features, bias=False) self.linear = nn.Linear(2 * in_features, out_features) def forward(self, input_0, input_1): primals_1 = self.bilinear.weight primals_4 = self.linear.weight primals_5 = self.linear.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
verages/PaddleOCR2Pytorch
BiaffineAttention
false
4,662
[ "Apache-2.0" ]
0
201f0d5d6007f49620c49af7d222c3b220eb3e70
https://github.com/verages/PaddleOCR2Pytorch/tree/201f0d5d6007f49620c49af7d222c3b220eb3e70
StateAttention
import torch import torch.nn as nn class StateAttention(nn.Module): def __init__(self): super(StateAttention, self).__init__() self.sm = nn.Softmax(dim=1) def forward(self, a_t, r_t, input_embedding, padded_mask): new_a_t = torch.zeros_like(a_t) for i in range(a_t.shape[1]): if i == 0: new_a_t[:, i] = a_t[:, 0] * r_t[:, 0] else: window = a_t[:, i - 1:i + 1] window_sum = window[:, 0] * r_t[:, 1] + window[:, 1] * r_t[:, 0 ] new_a_t[:, i - 1] += (1 - padded_mask[:, i]) * window_sum new_a_t[:, i] += padded_mask[:, i] * window_sum new_a_t = new_a_t.unsqueeze(dim=1) output = torch.matmul(new_a_t, input_embedding).squeeze(dim=1) return output, new_a_t.squeeze(dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp2 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp31 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp0 = tl.full([1], 0, tl.int32) tmp1 = tmp0 == tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = tl.where(tmp1, tmp4, tmp5) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp11 = tmp2 * tmp10 tmp13 = tmp12 * tmp3 tmp14 = tmp11 + tmp13 tmp15 = tmp9 * tmp14 tmp16 = tmp6 + tmp15 tmp17 = tl.full([1], 1, tl.int32) tmp18 = tmp17 == tmp0 tmp19 = tl.where(tmp1, tmp16, tmp6) tmp20 = tl.where(tmp18, tmp4, tmp5) tmp21 = tl.where(tmp18, tmp16, tmp20) tmp22 = tl.where(tmp18, tmp19, tmp21) tmp23 = tmp7 * tmp14 tmp24 = tmp22 + tmp23 tmp25 = tmp17 == tmp17 tmp26 = tl.where(tmp25, tmp24, tmp22) tmp27 = tl.where(tmp25, tmp26, tmp26) tmp29 = tmp8 - tmp28 tmp30 = tmp12 * tmp10 tmp32 = tmp31 * tmp3 tmp33 = tmp30 + tmp32 tmp34 = tmp29 * tmp33 tmp35 = tmp27 + tmp34 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp24, xmask) tl.store(out_ptr2 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_add_copy_mul_rsub_zeros_like_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tmp1 == tmp1 tmp6 = tl.full([1], 0, tl.int32) tmp7 = tmp1 == tmp6 tmp8 = tmp6 == tmp6 tmp12 = tmp10 * tmp11 tmp13 = 0.0 tmp14 = tl.where(tmp8, tmp12, tmp13) tmp15 = tl.where(tmp8, tmp9, tmp14) tmp16 = tl.where(tmp7, tmp12, tmp13) tmp17 = tl.where(tmp7, tmp9, tmp16) tmp18 = tl.where(tmp7, tmp15, tmp17) tmp19 = tl.where(tmp4, tmp5, tmp18) tmp20 = tmp0 == tmp6 tmp21 = tl.where(tmp20, tmp12, tmp13) tmp22 = tl.where(tmp20, tmp9, tmp21) tmp23 = tl.where(tmp20, tmp15, tmp22) tmp24 = tl.where(tmp2, tmp5, tmp23) tmp25 = tl.where(tmp2, tmp19, tmp24) tmp26 = tl.where(tmp2, tmp3, tmp25) tl.store(out_ptr0 + x3, tmp26, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr3 + (16 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr3 + (x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp0 = tl.full([1], 2, tl.int32) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp9 = tmp7 * tmp8 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tmp6 * tmp13 tmp15 = tmp5 + tmp14 tmp16 = tmp0 == tmp0 tmp17 = tl.where(tmp16, tmp15, tmp5) tmp18 = tl.where(tmp16, tmp17, tmp17) tmp20 = 1.0 tmp21 = tmp20 - tmp19 tmp22 = tmp10 * tmp8 tmp24 = tmp23 * tmp11 tmp25 = tmp22 + tmp24 tmp26 = tmp21 * tmp25 tmp27 = tmp18 + tmp26 tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp27, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr2 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr2 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tmp1 == tmp1 tmp6 = tl.full([1], 1, tl.int32) tmp7 = tmp1 == tmp6 tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tl.where(tmp4, tmp5, tmp10) tmp12 = tmp0 == tmp6 tmp14 = tl.where(tmp12, tmp8, tmp13) tmp15 = tl.where(tmp2, tmp5, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tmp17 = tl.where(tmp2, tmp3, tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr3 + (16 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr3 + (x0 + 64 * x1), xmask) tmp0 = tl.full([1], 3, tl.int32) tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp9 = tmp7 * tmp8 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tmp6 * tmp13 tmp15 = tmp5 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x3 = xindex // 256 x5 = xindex % 64 x6 = xindex tmp4 = tl.load(in_ptr0 + (x0 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (48 + x0 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp0 = x1 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tmp1 == tmp1 tmp5 = tl.full([1], 2, tl.int32) tmp6 = tmp1 == tmp5 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tl.where(tmp3, tmp4, tmp9) tmp11 = tmp0 == tmp5 tmp13 = tl.where(tmp11, tmp7, tmp12) tmp14 = tl.where(tmp2, tmp4, tmp13) tmp15 = tl.where(tmp2, tmp10, tmp14) tl.store(out_ptr0 + x6, tmp15, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp4 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tmp1 == tmp1 tmp5 = tl.full([1], 2, tl.int32) tmp6 = tmp1 == tmp5 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tl.where(tmp3, tmp4, tmp9) tmp11 = tmp0 == tmp5 tmp13 = tl.where(tmp11, tmp7, tmp12) tmp14 = tl.where(tmp2, tmp4, tmp13) tmp15 = tl.where(tmp2, tmp10, tmp14) tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_0[grid(64)](arg0_1, arg1_1, arg2_1, buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_copy_mul_rsub_zeros_like_1[grid(256)](buf2, buf1, buf0, arg0_1, arg1_1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf4 = buf2 del buf2 buf5 = buf1 del buf1 triton_poi_fused_add_mul_rsub_2[grid(64)](buf3, arg2_1, arg0_1, arg1_1, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_3[grid(256)](buf5, buf4, buf3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 buf7 = buf5 del buf5 triton_poi_fused_add_mul_4[grid(64)](buf6, arg2_1, arg0_1, arg1_1, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(1024)](buf7, buf6, buf8, 1024, XBLOCK =256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(1024)](arg3_1, buf9, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg3_1 buf10 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (64, 4, 4), (16, 4, 1), 0), out=buf10) del buf8 del buf9 buf11 = buf3 del buf3 triton_poi_fused_add_mul_7[grid(256)](buf7, buf6, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del buf7 return reinterpret_tensor(buf10, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), buf11 class StateAttentionNew(nn.Module): def __init__(self): super(StateAttentionNew, self).__init__() self.sm = nn.Softmax(dim=1) def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
zhangyuejoslin/selfmonitoring-agent
StateAttention
false
4,663
[ "MIT" ]
0
9401ceb492f6c4576d62404b62e815d184136b24
https://github.com/zhangyuejoslin/selfmonitoring-agent/tree/9401ceb492f6c4576d62404b62e815d184136b24
C1
import torch import torch.nn as nn from collections import OrderedDict class C1(nn.Module): def __init__(self): super(C1, self).__init__() self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, img): output = self.c1(img) return output def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 86400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 6 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 21600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x2 = xindex // 900 x5 = xindex x4 = xindex // 5400 x6 = xindex % 5400 tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 5504 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(86400)](buf0, primals_2, buf1, 86400, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 6, 30, 30), (5400, 900, 30, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(21600)](buf1, buf2, buf3, 21600, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C1New(nn.Module): def __init__(self): super(C1New, self).__init__() self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c1.c1.weight primals_2 = self.c1.c1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
zjgbz/img_cls
C1
false
4,664
[ "MIT" ]
0
513d5ae423d95e008a82a6ffe443db49f8ed9ac2
https://github.com/zjgbz/img_cls/tree/513d5ae423d95e008a82a6ffe443db49f8ed9ac2
SEModule
import torch import torch.nn as nn import torch.nn.functional as F def hard_sigmoid(x, slope=0.1666667, offset=0.5): return torch.clamp(slope * x + offset, 0.0, 1.0) class SEModule(nn.Module): def __init__(self, in_channels, reduction=4, name=''): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True) self.conv2 = nn.Conv2d(in_channels=in_channels // reduction, out_channels=in_channels, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, inputs): outputs = self.avg_pool(inputs) outputs = self.conv1(outputs) outputs = F.relu(outputs) outputs = self.conv2(outputs) outputs = hard_sigmoid(outputs, slope=0.2, offset=0.5) return inputs * outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_clamp_convolution_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.2 tmp5 = tmp3 * tmp4 tmp6 = 0.5 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = 1.0 tmp11 = triton_helpers.minimum(tmp9, tmp10) tmp12 = tmp0 * tmp11 tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_convolution_ge_le_logical_and_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.2 tmp4 = tmp2 * tmp3 tmp5 = 0.5 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = tmp6 >= tmp7 tmp9 = 1.0 tmp10 = tmp6 <= tmp9 tmp11 = tmp8 & tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_clamp_convolution_mul_2[grid(256)](primals_1, buf4, primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) triton_poi_fused_add_convolution_ge_le_logical_and_mul_3[grid(16)](buf4 , primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del primals_5 return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6 def hard_sigmoid(x, slope=0.1666667, offset=0.5): return torch.clamp(slope * x + offset, 0.0, 1.0) class SEModuleNew(nn.Module): def __init__(self, in_channels, reduction=4, name=''): super(SEModuleNew, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True) self.conv2 = nn.Conv2d(in_channels=in_channels // reduction, out_channels=in_channels, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
verages/PaddleOCR2Pytorch
SEModule
false
4,665
[ "Apache-2.0" ]
0
201f0d5d6007f49620c49af7d222c3b220eb3e70
https://github.com/verages/PaddleOCR2Pytorch/tree/201f0d5d6007f49620c49af7d222c3b220eb3e70
BiAttention
import torch from torchvision.transforms import functional as F import torch.utils.data import torch.nn as nn import torch.nn.functional as F from torch.nn.utils import weight_norm class FCNet(nn.Module): def __init__(self, in_size, out_size, activate=None, drop=0.0): super(FCNet, self).__init__() self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None) self.drop_value = drop self.drop = nn.Dropout(drop) self.activate = activate.lower() if activate is not None else None if activate == 'relu': self.ac_fn = nn.ReLU() elif activate == 'sigmoid': self.ac_fn = nn.Sigmoid() elif activate == 'tanh': self.ac_fn = nn.Tanh() def forward(self, x): if self.drop_value > 0: x = self.drop(x) x = self.lin(x) if self.activate is not None: x = self.ac_fn(x) return x class BiAttention(nn.Module): def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0 ): super(BiAttention, self).__init__() self.hidden_aug = 3 self.glimpses = glimpses self.lin_v = FCNet(v_features, int(mid_features * self.hidden_aug), activate='relu', drop=drop / 2.5) self.lin_q = FCNet(q_features, int(mid_features * self.hidden_aug), activate='relu', drop=drop / 2.5) self.h_weight = nn.Parameter(torch.Tensor(1, glimpses, 1, int( mid_features * self.hidden_aug)).normal_()) self.h_bias = nn.Parameter(torch.Tensor(1, glimpses, 1, 1).normal_()) self.drop = nn.Dropout(drop) def forward(self, v, q): """ v = batch, num_obj, dim q = batch, que_len, dim """ v_num = v.size(1) q_num = q.size(1) batch_num = v.size(0) v_ = self.lin_v(v).unsqueeze(1) q_ = self.lin_q(q).unsqueeze(1) v_ = self.drop(v_) h_ = v_ * self.h_weight logits = torch.matmul(h_, q_.transpose(2, 3)) logits = logits + self.h_bias atten = F.softmax(logits.view(batch_num, self.glimpses, v_num * q_num), 2) return atten.view(batch_num, self.glimpses, v_num, q_num) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'v_features': 4, 'q_features': 4, 'mid_features': 4, 'glimpses': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch.nn as nn from torch.nn.utils import weight_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp7 = tl.load(in_ptr1 + 0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp9 = tmp8 / tmp6 tmp10 = tmp0 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp10, rmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 192 x4 = xindex % 48 x0 = xindex % 12 x2 = xindex // 48 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 48 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 12 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 12 x3 = xindex // 192 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 12 * x0 + 48 * x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_per_fused__softmax_3(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp8 / tmp12 tl.store(out_ptr2 + (r2 + 16 * x3), tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (), ()) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (), ()) assert_size_stride(primals_7, (12, 4), (4, 1)) assert_size_stride(primals_8, (12,), (1,)) assert_size_stride(primals_9, (1, 4, 1, 12), (48, 12, 12, 1)) assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((12, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_4, primals_3, buf2, 1, 48, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((12, 4), (4, 1), torch.float32) triton_per_fused_div_mul_norm_0[grid(1)](buf5, primals_7, primals_6, buf6, 1, 48, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf6, (4, 12), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) triton_poi_fused_mul_1[grid(768)](buf3, primals_9, buf8, 768, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 12, 4), (192, 48, 4, 1), torch.float32 ) triton_poi_fused_clone_2[grid(768)](buf7, primals_8, buf9, 768, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 12), (48, 12, 1 ), 0), reinterpret_tensor(buf9, (16, 12, 4), (48, 4, 1), 0), out=buf10) buf13 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_per_fused__softmax_3[grid(16)](buf10, primals_10, buf13, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf10 del primals_10 buf14 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(192)](buf7, primals_8, buf14, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del primals_8 return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, buf6, primals_3, primals_4, primals_6, primals_7, primals_9, buf1, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf3, buf5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf8, (16, 12, 4), (48, 1, 12), 0), reinterpret_tensor(buf9, (16, 4, 12), (48, 1, 4), 0), buf14) class FCNet(nn.Module): def __init__(self, in_size, out_size, activate=None, drop=0.0): super(FCNet, self).__init__() self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None) self.drop_value = drop self.drop = nn.Dropout(drop) self.activate = activate.lower() if activate is not None else None if activate == 'relu': self.ac_fn = nn.ReLU() elif activate == 'sigmoid': self.ac_fn = nn.Sigmoid() elif activate == 'tanh': self.ac_fn = nn.Tanh() def forward(self, x): if self.drop_value > 0: x = self.drop(x) x = self.lin(x) if self.activate is not None: x = self.ac_fn(x) return x class BiAttentionNew(nn.Module): def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0 ): super(BiAttentionNew, self).__init__() self.hidden_aug = 3 self.glimpses = glimpses self.lin_v = FCNet(v_features, int(mid_features * self.hidden_aug), activate='relu', drop=drop / 2.5) self.lin_q = FCNet(q_features, int(mid_features * self.hidden_aug), activate='relu', drop=drop / 2.5) self.h_weight = nn.Parameter(torch.Tensor(1, glimpses, 1, int( mid_features * self.hidden_aug)).normal_()) self.h_bias = nn.Parameter(torch.Tensor(1, glimpses, 1, 1).normal_()) self.drop = nn.Dropout(drop) def forward(self, input_0, input_1): primals_9 = self.h_weight primals_10 = self.h_bias primals_5 = self.lin_v.lin.bias primals_3 = self.lin_v.lin.weight_g primals_4 = self.lin_v.lin.weight_v primals_8 = self.lin_q.lin.bias primals_6 = self.lin_q.lin.weight_g primals_7 = self.lin_q.lin.weight_v primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
zhanwenchen/Scene-Graph-Benchmark.pytorch
BiAttention
false
4,666
[ "MIT" ]
0
c86475bcbdaefcc1656a2890194355c2b32aa694
https://github.com/zhanwenchen/Scene-Graph-Benchmark.pytorch/tree/c86475bcbdaefcc1656a2890194355c2b32aa694
RSELayer
import torch import torch.nn as nn import torch.nn.functional as F def hard_sigmoid(x, slope=0.1666667, offset=0.5): return torch.clamp(slope * x + offset, 0.0, 1.0) class SEModule(nn.Module): def __init__(self, in_channels, reduction=4, name=''): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True) self.conv2 = nn.Conv2d(in_channels=in_channels // reduction, out_channels=in_channels, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, inputs): outputs = self.avg_pool(inputs) outputs = self.conv1(outputs) outputs = F.relu(outputs) outputs = self.conv2(outputs) outputs = hard_sigmoid(outputs, slope=0.2, offset=0.5) return inputs * outputs class RSELayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, shortcut=True): super(RSELayer, self).__init__() self.out_channels = out_channels self.in_conv = nn.Conv2d(in_channels=in_channels, out_channels=self .out_channels, kernel_size=kernel_size, padding=int(kernel_size // 2), bias=False) self.se_block = SEModule(self.out_channels) self.shortcut = shortcut def forward(self, ins): x = self.in_conv(ins) if self.shortcut: out = x + self.se_block(x) else: out = self.se_block(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 25 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 25 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 25.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_clamp_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 25 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = 0.2 tmp3 = tmp1 * tmp2 tmp4 = 0.5 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 1.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tmp10 = tmp0 * tmp9 tmp11 = tmp0 + tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf1 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf2, buf0, 16, 25, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(4)](buf4, primals_4, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(16)](buf6, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf7 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) triton_poi_fused_add_clamp_mul_3[grid(400)](buf0, buf6, buf7, 400, XBLOCK=256, num_warps=4, num_stages=1) return (buf7, primals_1, primals_2, primals_3, primals_5, buf0, buf2, buf4, buf6) def hard_sigmoid(x, slope=0.1666667, offset=0.5): return torch.clamp(slope * x + offset, 0.0, 1.0) class SEModule(nn.Module): def __init__(self, in_channels, reduction=4, name=''): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True) self.conv2 = nn.Conv2d(in_channels=in_channels // reduction, out_channels=in_channels, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, inputs): outputs = self.avg_pool(inputs) outputs = self.conv1(outputs) outputs = F.relu(outputs) outputs = self.conv2(outputs) outputs = hard_sigmoid(outputs, slope=0.2, offset=0.5) return inputs * outputs class RSELayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, shortcut=True): super(RSELayerNew, self).__init__() self.out_channels = out_channels self.in_conv = nn.Conv2d(in_channels=in_channels, out_channels=self .out_channels, kernel_size=kernel_size, padding=int(kernel_size // 2), bias=False) self.se_block = SEModule(self.out_channels) self.shortcut = shortcut def forward(self, input_0): primals_1 = self.in_conv.weight primals_3 = self.se_block.conv1.weight primals_4 = self.se_block.conv1.bias primals_5 = self.se_block.conv2.weight primals_6 = self.se_block.conv2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
verages/PaddleOCR2Pytorch
RSELayer
false
4,667
[ "Apache-2.0" ]
0
201f0d5d6007f49620c49af7d222c3b220eb3e70
https://github.com/verages/PaddleOCR2Pytorch/tree/201f0d5d6007f49620c49af7d222c3b220eb3e70
F_fully_connected
import torch import torch.nn as nn import torch.optim class F_fully_connected(nn.Module): """Fully connected tranformation, not reversible, but used below.""" def __init__(self, size_in, size, internal_size=None, dropout=0.0): super().__init__() if not internal_size: internal_size = 2 * size self.d1 = nn.Dropout(p=dropout) self.d2 = nn.Dropout(p=dropout) self.d2b = nn.Dropout(p=dropout) self.fc1 = nn.Linear(size_in, internal_size) self.fc2 = nn.Linear(internal_size, internal_size) self.fc2b = nn.Linear(internal_size, internal_size) self.fc3 = nn.Linear(internal_size, size) self.nl1 = nn.ReLU() self.nl2 = nn.ReLU() self.nl2b = nn.ReLU() def forward(self, x): out = self.nl1(self.d1(self.fc1(x))) out = self.nl2(self.d2(self.fc2(out))) out = self.nl2b(self.d2b(self.fc2b(out))) out = self.fc3(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size_in': 4, 'size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, 8), (8, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (8, 8), (8, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, primals_2, buf9, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 8), (1, 8), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf3, primals_5, buf8, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 8), (1, 8), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf5, primals_7, buf7, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor( buf3, (64, 8), (8, 1), 0), reinterpret_tensor(buf5, (64, 8), (8, 1), 0 ), primals_8, buf7, primals_6, buf8, primals_4, buf9 class F_fully_connectedNew(nn.Module): """Fully connected tranformation, not reversible, but used below.""" def __init__(self, size_in, size, internal_size=None, dropout=0.0): super().__init__() if not internal_size: internal_size = 2 * size self.d1 = nn.Dropout(p=dropout) self.d2 = nn.Dropout(p=dropout) self.d2b = nn.Dropout(p=dropout) self.fc1 = nn.Linear(size_in, internal_size) self.fc2 = nn.Linear(internal_size, internal_size) self.fc2b = nn.Linear(internal_size, internal_size) self.fc3 = nn.Linear(internal_size, size) self.nl1 = nn.ReLU() self.nl2 = nn.ReLU() self.nl2b = nn.ReLU() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc2b.weight primals_7 = self.fc2b.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
zimmerrol/FrEIA
F_fully_connected
false
4,668
[ "MIT" ]
0
73d01ab8c90e0deb5e242d66405bd168db06dc19
https://github.com/zimmerrol/FrEIA/tree/73d01ab8c90e0deb5e242d66405bd168db06dc19
C2
import torch import torch.nn as nn from collections import OrderedDict class C2(nn.Module): def __init__(self): super(C2, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(6, 16, kernel_size=(5, 5))), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, img): output = self.c2(img) return output def get_inputs(): return [torch.rand([4, 6, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 16 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x2 = xindex // 900 x5 = xindex x4 = xindex // 14400 x6 = xindex % 14400 tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 14464 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 6, 64, 64), (24576, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 60, 60), (57600, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 16, 60, 60), (57856, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(230400)](buf0, primals_2, buf1, 230400, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 16, 30, 30), (14400, 900, 30, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 30, 30), (14464, 900, 30, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(57600)](buf1, buf2, buf3, 57600, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C2New(nn.Module): def __init__(self): super(C2New, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(6, 16, kernel_size=(5, 5))), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c2.c2.weight primals_2 = self.c2.c2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
zjgbz/img_cls
C2
false
4,669
[ "MIT" ]
0
513d5ae423d95e008a82a6ffe443db49f8ed9ac2
https://github.com/zjgbz/img_cls/tree/513d5ae423d95e008a82a6ffe443db49f8ed9ac2
SubSample
import torch import torch.nn as nn class SubSample(nn.Module): def __init__(self, in_channels, out_channels, types='Pool', stride=[2, 1], sub_norm='nn.LayerNorm', act=None): super().__init__() self.types = types if types == 'Pool': self.avgpool = nn.AvgPool2d(kernel_size=[3, 5], stride=stride, padding=[1, 2]) self.maxpool = nn.MaxPool2d(kernel_size=[3, 5], stride=stride, padding=[1, 2]) self.proj = nn.Linear(in_channels, out_channels) else: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1) self.norm = eval(sub_norm)(out_channels) if act is not None: self.act = act() else: self.act = None def forward(self, x): if self.types == 'Pool': x1 = self.avgpool(x) x2 = self.maxpool(x) x = (x1 + x2) * 0.5 out = self.proj(x.flatten(2).permute(0, 2, 1)) else: x = self.conv(x) out = x.flatten(2).permute(0, 2, 1) out = self.norm(out) if self.act is not None: out = self.act(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_clone_max_pool2d_with_indices_0(in_ptr0, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex // 4 x1 = xindex % 4 y0 = yindex x5 = xindex y3 = yindex % 4 y4 = yindex // 4 tmp0 = -1 + 2 * x2 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -2 + x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-6 + x1 + 8 * x2 + 16 * y0), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = -1 + x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-5 + x1 + 8 * x2 + 16 * y0), tmp16 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp18 = tmp17 + tmp11 tmp19 = x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-4 + x1 + 8 * x2 + 16 * y0), tmp23 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp25 = tmp24 + tmp18 tmp26 = 1 + x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + (-3 + x1 + 8 * x2 + 16 * y0), tmp30 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 + tmp25 tmp33 = 2 + x1 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp5 & tmp36 tmp38 = tl.load(in_ptr0 + (-2 + x1 + 8 * x2 + 16 * y0), tmp37 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp39 = tmp38 + tmp32 tmp40 = 2 * x2 tmp41 = tmp40 >= tmp1 tmp42 = tmp40 < tmp3 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp9 tmp45 = tl.load(in_ptr0 + (-2 + x1 + 8 * x2 + 16 * y0), tmp44 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp46 = tmp45 + tmp39 tmp47 = tmp43 & tmp15 tmp48 = tl.load(in_ptr0 + (-1 + x1 + 8 * x2 + 16 * y0), tmp47 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp49 = tmp48 + tmp46 tmp50 = tmp43 & tmp22 tmp51 = tl.load(in_ptr0 + (x1 + 8 * x2 + 16 * y0), tmp50 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp52 = tmp51 + tmp49 tmp53 = tmp43 & tmp29 tmp54 = tl.load(in_ptr0 + (1 + x1 + 8 * x2 + 16 * y0), tmp53 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp55 = tmp54 + tmp52 tmp56 = tmp43 & tmp36 tmp57 = tl.load(in_ptr0 + (2 + x1 + 8 * x2 + 16 * y0), tmp56 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp58 = tmp57 + tmp55 tmp59 = 1 + 2 * x2 tmp60 = tmp59 >= tmp1 tmp61 = tmp59 < tmp3 tmp62 = tmp60 & tmp61 tmp63 = tmp62 & tmp9 tmp64 = tl.load(in_ptr0 + (2 + x1 + 8 * x2 + 16 * y0), tmp63 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp65 = tmp64 + tmp58 tmp66 = tmp62 & tmp15 tmp67 = tl.load(in_ptr0 + (3 + x1 + 8 * x2 + 16 * y0), tmp66 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp68 = tmp67 + tmp65 tmp69 = tmp62 & tmp22 tmp70 = tl.load(in_ptr0 + (4 + x1 + 8 * x2 + 16 * y0), tmp69 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp71 = tmp70 + tmp68 tmp72 = tmp62 & tmp29 tmp73 = tl.load(in_ptr0 + (5 + x1 + 8 * x2 + 16 * y0), tmp72 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp74 = tmp73 + tmp71 tmp75 = tmp62 & tmp36 tmp76 = tl.load(in_ptr0 + (6 + x1 + 8 * x2 + 16 * y0), tmp75 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp77 = tmp76 + tmp74 tmp78 = 2 + -1 * x1 + -4 * x2 + 2 * (5 * (5 <= 2 + 2 * x2) + (2 + 2 * x2) * (2 + 2 * x2 < 5)) + (5 * (5 <= 2 + 2 * x2) + (2 + 2 * x2) * ( 2 + 2 * x2 < 5)) * (6 * (6 <= 3 + x1) + (3 + x1) * (3 + x1 < 6) ) + -1 * x1 * (5 * (5 <= 2 + 2 * x2) + (2 + 2 * x2) * (2 + 2 * x2 < 5) ) + -2 * x2 * (6 * (6 <= 3 + x1) + (3 + x1) * (3 + x1 < 6) ) + 2 * x1 * x2 + (6 * (6 <= 3 + x1) + (3 + x1) * (3 + x1 < 6)) tmp79 = tmp77 / tmp78 tmp80 = tl.load(in_ptr0 + (-6 + x1 + 8 * x2 + 16 * y0), tmp10 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp81 = tl.load(in_ptr0 + (-5 + x1 + 8 * x2 + 16 * y0), tmp16 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp82 = triton_helpers.maximum(tmp81, tmp80) tmp83 = tl.load(in_ptr0 + (-4 + x1 + 8 * x2 + 16 * y0), tmp23 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp84 = triton_helpers.maximum(tmp83, tmp82) tmp85 = tl.load(in_ptr0 + (-3 + x1 + 8 * x2 + 16 * y0), tmp30 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp86 = triton_helpers.maximum(tmp85, tmp84) tmp87 = tl.load(in_ptr0 + (-2 + x1 + 8 * x2 + 16 * y0), tmp37 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp88 = triton_helpers.maximum(tmp87, tmp86) tmp89 = tl.load(in_ptr0 + (-2 + x1 + 8 * x2 + 16 * y0), tmp44 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp90 = triton_helpers.maximum(tmp89, tmp88) tmp91 = tl.load(in_ptr0 + (-1 + x1 + 8 * x2 + 16 * y0), tmp47 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp92 = triton_helpers.maximum(tmp91, tmp90) tmp93 = tl.load(in_ptr0 + (x1 + 8 * x2 + 16 * y0), tmp50 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp94 = triton_helpers.maximum(tmp93, tmp92) tmp95 = tl.load(in_ptr0 + (1 + x1 + 8 * x2 + 16 * y0), tmp53 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp96 = triton_helpers.maximum(tmp95, tmp94) tmp97 = tl.load(in_ptr0 + (2 + x1 + 8 * x2 + 16 * y0), tmp56 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp98 = triton_helpers.maximum(tmp97, tmp96) tmp99 = tl.load(in_ptr0 + (2 + x1 + 8 * x2 + 16 * y0), tmp63 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp100 = triton_helpers.maximum(tmp99, tmp98) tmp101 = tl.load(in_ptr0 + (3 + x1 + 8 * x2 + 16 * y0), tmp66 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp102 = triton_helpers.maximum(tmp101, tmp100) tmp103 = tl.load(in_ptr0 + (4 + x1 + 8 * x2 + 16 * y0), tmp69 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp104 = triton_helpers.maximum(tmp103, tmp102) tmp105 = tl.load(in_ptr0 + (5 + x1 + 8 * x2 + 16 * y0), tmp72 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp106 = triton_helpers.maximum(tmp105, tmp104) tmp107 = tl.load(in_ptr0 + (6 + x1 + 8 * x2 + 16 * y0), tmp75 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp108 = triton_helpers.maximum(tmp107, tmp106) tmp109 = tmp79 + tmp108 tmp110 = 0.5 tmp111 = tmp109 * tmp110 tl.store(out_ptr2 + (y3 + 4 * x5 + 32 * y4), tmp111, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp7 = tmp4 + tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 + tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_clone_max_pool2d_with_indices_0[grid(16, 8) ](primals_1, buf2, 16, 8, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf3 = empty_strided_cuda((32, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (32, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 8, 1), (8, 1, 32), torch.float32) buf5 = empty_strided_cuda((4, 8, 1), (8, 1, 32), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(32)](buf3, primals_3, buf4, buf5, 32, XBLOCK=32, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(128)](buf3, primals_3, buf4, buf5, primals_4, primals_5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del buf5 del primals_5 return buf6, primals_3, primals_4, reinterpret_tensor(buf2, (32, 4), (4, 1), 0), buf3 class SubSampleNew(nn.Module): def __init__(self, in_channels, out_channels, types='Pool', stride=[2, 1], sub_norm='nn.LayerNorm', act=None): super().__init__() self.types = types if types == 'Pool': self.avgpool = nn.AvgPool2d(kernel_size=[3, 5], stride=stride, padding=[1, 2]) self.maxpool = nn.MaxPool2d(kernel_size=[3, 5], stride=stride, padding=[1, 2]) self.proj = nn.Linear(in_channels, out_channels) else: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1) self.norm = eval(sub_norm)(out_channels) if act is not None: self.act = act() else: self.act = None def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_4 = self.norm.weight primals_5 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
verages/PaddleOCR2Pytorch
SubSample
false
4,670
[ "Apache-2.0" ]
0
201f0d5d6007f49620c49af7d222c3b220eb3e70
https://github.com/verages/PaddleOCR2Pytorch/tree/201f0d5d6007f49620c49af7d222c3b220eb3e70
Critic
import torch from torch import nn import torch.nn.functional as F class Critic(nn.Module): """Critic model Parameters: args (object): Parameter class """ def __init__(self, state_dim, action_dim): super(Critic, self).__init__() l1 = 400 l2 = 300 self.q1f1 = nn.Linear(state_dim + action_dim, l1) self.q1ln1 = nn.LayerNorm(l1) self.q1f2 = nn.Linear(l1, l2) self.q1ln2 = nn.LayerNorm(l2) self.q1out = nn.Linear(l2, 1) self.q2f1 = nn.Linear(state_dim + action_dim, l1) self.q2ln1 = nn.LayerNorm(l1) self.q2f2 = nn.Linear(l1, l2) self.q2ln2 = nn.LayerNorm(l2) self.q2out = nn.Linear(l2, 1) self.vf1 = nn.Linear(state_dim, l1) self.vln1 = nn.LayerNorm(l1) self.vf2 = nn.Linear(l1, l2) self.vln2 = nn.LayerNorm(l2) self.vout = nn.Linear(l2, 1) def forward(self, obs, action): """Method to forward propagate through the critic's graph Parameters: input (tensor): states input (tensor): actions Returns: Q1 (tensor): Qval 1 Q2 (tensor): Qval 2 V (tensor): Value """ state = torch.cat([obs, action], 1) q1 = F.elu(self.q1f1(state)) q1 = self.q1ln1(q1) q1 = F.elu(self.q1f2(q1)) q1 = self.q1ln2(q1) q1 = self.q1out(q1) q2 = F.elu(self.q2f1(state)) q2 = self.q2ln1(q2) q2 = F.elu(self.q2f2(q2)) q2 = self.q2ln2(q2) q2 = self.q2out(q2) v = F.elu(self.vf1(obs)) v = self.vln1(v) v = F.elu(self.vf2(v)) v = self.vln2(v) v = self.vout(v) return q1, q2, v def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_per_fused_elu_native_layer_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 400 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 400 * x0), rmask, other=0.0) tmp31 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tl.where(rmask, tmp8, 0) tmp11 = tl.broadcast_to(tmp8, [RBLOCK]) tmp13 = tl.where(rmask, tmp11, 0) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp15 = tl.full([1], 400, tl.int32) tmp16 = tmp15.to(tl.float32) tmp17 = tmp14 / tmp16 tmp18 = tmp8 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tmp24 = 400.0 tmp25 = tmp23 / tmp24 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tmp29 = tmp7 - tmp17 tmp30 = tmp29 * tmp28 tmp32 = tmp30 * tmp31 tmp34 = tmp32 + tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp28, None) tl.store(out_ptr1 + (r1 + 400 * x0), tmp34, rmask) tl.store(out_ptr0 + x0, tmp17, None) @triton.jit def triton_per_fused_elu_native_layer_norm_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 300 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 300 * x0), rmask, other=0.0) tmp31 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tl.where(rmask, tmp8, 0) tmp11 = tl.broadcast_to(tmp8, [RBLOCK]) tmp13 = tl.where(rmask, tmp11, 0) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp15 = tl.full([1], 300, tl.int32) tmp16 = tmp15.to(tl.float32) tmp17 = tmp14 / tmp16 tmp18 = tmp8 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tmp24 = 300.0 tmp25 = tmp23 / tmp24 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tmp29 = tmp7 - tmp17 tmp30 = tmp29 * tmp28 tmp32 = tmp30 * tmp31 tmp34 = tmp32 + tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp28, None) tl.store(out_ptr1 + (r1 + 300 * x0), tmp34, rmask) tl.store(out_ptr0 + x0, tmp17, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400,), (1,)) assert_size_stride(primals_5, (400,), (1,)) assert_size_stride(primals_6, (400,), (1,)) assert_size_stride(primals_7, (300, 400), (400, 1)) assert_size_stride(primals_8, (300,), (1,)) assert_size_stride(primals_9, (300,), (1,)) assert_size_stride(primals_10, (300,), (1,)) assert_size_stride(primals_11, (1, 300), (300, 1)) assert_size_stride(primals_12, (1,), (1,)) assert_size_stride(primals_13, (400, 8), (8, 1)) assert_size_stride(primals_14, (400,), (1,)) assert_size_stride(primals_15, (400,), (1,)) assert_size_stride(primals_16, (400,), (1,)) assert_size_stride(primals_17, (300, 400), (400, 1)) assert_size_stride(primals_18, (300,), (1,)) assert_size_stride(primals_19, (300,), (1,)) assert_size_stride(primals_20, (300,), (1,)) assert_size_stride(primals_21, (1, 300), (300, 1)) assert_size_stride(primals_22, (1,), (1,)) assert_size_stride(primals_23, (400, 4), (4, 1)) assert_size_stride(primals_24, (400,), (1,)) assert_size_stride(primals_25, (400,), (1,)) assert_size_stride(primals_26, (400,), (1,)) assert_size_stride(primals_27, (300, 400), (400, 1)) assert_size_stride(primals_28, (300,), (1,)) assert_size_stride(primals_29, (300,), (1,)) assert_size_stride(primals_30, (300,), (1,)) assert_size_stride(primals_31, (1, 300), (300, 1)) assert_size_stride(primals_32, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 400), (400, 1), torch.float32) triton_per_fused_elu_native_layer_norm_1[grid(4)](buf5, buf1, primals_5, primals_6, buf2, buf6, 4, 400, num_warps=4, num_stages=1 ) del primals_6 buf7 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7, (400, 300), (1, 400), 0), alpha=1, beta=1, out=buf7) del primals_8 buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf11 = reinterpret_tensor(buf9, (4, 1), (1, 1), 0) del buf9 buf12 = empty_strided_cuda((4, 300), (300, 1), torch.float32) triton_per_fused_elu_native_layer_norm_2[grid(4)](buf11, buf7, primals_9, primals_10, buf8, buf12, 4, 300, num_warps=4, num_stages=1) del primals_10 buf14 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_12, buf12, reinterpret_tensor( primals_11, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf14) del primals_12 buf15 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.addmm(primals_14, buf0, reinterpret_tensor( primals_13, (8, 400), (1, 8), 0), alpha=1, beta=1, out=buf15) del primals_13 del primals_14 buf16 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf17 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf19 = reinterpret_tensor(buf17, (4, 1), (1, 1), 0) del buf17 buf20 = empty_strided_cuda((4, 400), (400, 1), torch.float32) triton_per_fused_elu_native_layer_norm_1[grid(4)](buf19, buf15, primals_15, primals_16, buf16, buf20, 4, 400, num_warps=4, num_stages=1) del primals_16 buf21 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.addmm(primals_18, buf20, reinterpret_tensor( primals_17, (400, 300), (1, 400), 0), alpha=1, beta=1, out=buf21) del primals_18 buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf23 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf25 = reinterpret_tensor(buf23, (4, 1), (1, 1), 0) del buf23 buf26 = empty_strided_cuda((4, 300), (300, 1), torch.float32) triton_per_fused_elu_native_layer_norm_2[grid(4)](buf25, buf21, primals_19, primals_20, buf22, buf26, 4, 300, num_warps=4, num_stages=1) del primals_20 buf28 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_22, buf26, reinterpret_tensor( primals_21, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf28) del primals_22 buf29 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.addmm(primals_24, primals_1, reinterpret_tensor( primals_23, (4, 400), (1, 4), 0), alpha=1, beta=1, out=buf29) del primals_23 del primals_24 buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf31 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf33 = reinterpret_tensor(buf31, (4, 1), (1, 1), 0) del buf31 buf34 = empty_strided_cuda((4, 400), (400, 1), torch.float32) triton_per_fused_elu_native_layer_norm_1[grid(4)](buf33, buf29, primals_25, primals_26, buf30, buf34, 4, 400, num_warps=4, num_stages=1) del primals_26 buf35 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.addmm(primals_28, buf34, reinterpret_tensor( primals_27, (400, 300), (1, 400), 0), alpha=1, beta=1, out=buf35) del primals_28 buf36 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf37 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf39 = reinterpret_tensor(buf37, (4, 1), (1, 1), 0) del buf37 buf40 = empty_strided_cuda((4, 300), (300, 1), torch.float32) triton_per_fused_elu_native_layer_norm_2[grid(4)](buf39, buf35, primals_29, primals_30, buf36, buf40, 4, 300, num_warps=4, num_stages=1) del primals_30 buf42 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_32, buf40, reinterpret_tensor( primals_31, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf42) del primals_32 return (buf14, buf28, buf42, primals_1, primals_5, primals_9, primals_15, primals_19, primals_25, primals_29, buf0, buf1, buf2, buf5, buf6, buf7, buf8, buf11, buf12, buf15, buf16, buf19, buf20, buf21, buf22, buf25, buf26, buf29, buf30, buf33, buf34, buf35, buf36, buf39, buf40, primals_31, primals_27, primals_21, primals_17, primals_11, primals_7) class CriticNew(nn.Module): """Critic model Parameters: args (object): Parameter class """ def __init__(self, state_dim, action_dim): super(CriticNew, self).__init__() l1 = 400 l2 = 300 self.q1f1 = nn.Linear(state_dim + action_dim, l1) self.q1ln1 = nn.LayerNorm(l1) self.q1f2 = nn.Linear(l1, l2) self.q1ln2 = nn.LayerNorm(l2) self.q1out = nn.Linear(l2, 1) self.q2f1 = nn.Linear(state_dim + action_dim, l1) self.q2ln1 = nn.LayerNorm(l1) self.q2f2 = nn.Linear(l1, l2) self.q2ln2 = nn.LayerNorm(l2) self.q2out = nn.Linear(l2, 1) self.vf1 = nn.Linear(state_dim, l1) self.vln1 = nn.LayerNorm(l1) self.vf2 = nn.Linear(l1, l2) self.vln2 = nn.LayerNorm(l2) self.vout = nn.Linear(l2, 1) def forward(self, input_0, input_1): primals_3 = self.q1f1.weight primals_4 = self.q1f1.bias primals_5 = self.q1ln1.weight primals_6 = self.q1ln1.bias primals_7 = self.q1f2.weight primals_8 = self.q1f2.bias primals_9 = self.q1ln2.weight primals_10 = self.q1ln2.bias primals_11 = self.q1out.weight primals_12 = self.q1out.bias primals_13 = self.q2f1.weight primals_14 = self.q2f1.bias primals_15 = self.q2ln1.weight primals_16 = self.q2ln1.bias primals_17 = self.q2f2.weight primals_18 = self.q2f2.bias primals_19 = self.q2ln2.weight primals_20 = self.q2ln2.bias primals_21 = self.q2out.weight primals_22 = self.q2out.bias primals_23 = self.vf1.weight primals_24 = self.vf1.bias primals_25 = self.vln1.weight primals_26 = self.vln1.bias primals_27 = self.vf2.weight primals_28 = self.vf2.bias primals_29 = self.vln2.weight primals_30 = self.vln2.bias primals_31 = self.vout.weight primals_32 = self.vout.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32]) return output[0], output[1], output[2]
zhan0903/cerl
Critic
false
4,671
[ "Apache-2.0" ]
0
6fb8aca9cb78b72947237edf2b9ed8362bd43829
https://github.com/zhan0903/cerl/tree/6fb8aca9cb78b72947237edf2b9ed8362bd43829
F_conv
import torch import warnings import torch.nn as nn import torch.nn.functional as F import torch.optim class F_conv(nn.Module): """ResNet transformation, not itself reversible, just used below""" def __init__(self, in_channels, channels, channels_hidden=None, stride= None, kernel_size=3, leaky_slope=0.1, batch_norm=False): super().__init__() if stride: warnings.warn( "Stride doesn't do anything, the argument should be removed", DeprecationWarning) if not channels_hidden: channels_hidden = channels pad = kernel_size // 2 self.leaky_slope = leaky_slope self.conv1 = nn.Conv2d(in_channels, channels_hidden, kernel_size= kernel_size, padding=pad, bias=not batch_norm) self.conv2 = nn.Conv2d(channels_hidden, channels_hidden, kernel_size=kernel_size, padding=pad, bias=not batch_norm) self.conv3 = nn.Conv2d(channels_hidden, channels, kernel_size= kernel_size, padding=pad, bias=not batch_norm) if batch_norm: self.bn1 = nn.BatchNorm2d(channels_hidden) self.bn1.weight.data.fill_(1) self.bn2 = nn.BatchNorm2d(channels_hidden) self.bn2.weight.data.fill_(1) self.bn3 = nn.BatchNorm2d(channels) self.bn3.weight.data.fill_(1) self.batch_norm = batch_norm def forward(self, x): out = self.conv1(x) if self.batch_norm: out = self.bn1(out) out = F.leaky_relu(out, self.leaky_slope) out = self.conv2(out) if self.batch_norm: out = self.bn2(out) out = F.leaky_relu(out, self.leaky_slope) out = self.conv3(out) if self.batch_norm: out = self.bn3(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import warnings import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf3, primals_5, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_1[grid(256)](buf7, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf4, buf5) class F_convNew(nn.Module): """ResNet transformation, not itself reversible, just used below""" def __init__(self, in_channels, channels, channels_hidden=None, stride= None, kernel_size=3, leaky_slope=0.1, batch_norm=False): super().__init__() if stride: warnings.warn( "Stride doesn't do anything, the argument should be removed", DeprecationWarning) if not channels_hidden: channels_hidden = channels pad = kernel_size // 2 self.leaky_slope = leaky_slope self.conv1 = nn.Conv2d(in_channels, channels_hidden, kernel_size= kernel_size, padding=pad, bias=not batch_norm) self.conv2 = nn.Conv2d(channels_hidden, channels_hidden, kernel_size=kernel_size, padding=pad, bias=not batch_norm) self.conv3 = nn.Conv2d(channels_hidden, channels, kernel_size= kernel_size, padding=pad, bias=not batch_norm) if batch_norm: self.bn1 = nn.BatchNorm2d(channels_hidden) self.bn1.weight.data.fill_(1) self.bn2 = nn.BatchNorm2d(channels_hidden) self.bn2.weight.data.fill_(1) self.bn3 = nn.BatchNorm2d(channels) self.bn3.weight.data.fill_(1) self.batch_norm = batch_norm def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
zimmerrol/FrEIA
F_conv
false
4,672
[ "MIT" ]
0
73d01ab8c90e0deb5e242d66405bd168db06dc19
https://github.com/zimmerrol/FrEIA/tree/73d01ab8c90e0deb5e242d66405bd168db06dc19
LR_PAD
import torch import torch.nn as nn def lr_pad(x, padding=1): """ Pad left/right-most to each other instead of zero padding """ return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3) class LR_PAD(nn.Module): """ Pad left/right-most to each other instead of zero padding """ def __init__(self, padding=1): super(LR_PAD, self).__init__() self.padding = padding def forward(self, x): return lr_pad(x, self.padding) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 * x1 + (-1 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 6, tl.int64) tmp14 = tl.load(in_ptr0 + 4 * x1, tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, def lr_pad(x, padding=1): """ Pad left/right-most to each other instead of zero padding """ return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3) class LR_PADNew(nn.Module): """ Pad left/right-most to each other instead of zero padding """ def __init__(self, padding=1): super(LR_PADNew, self).__init__() self.padding = padding def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zokin/HorizonNet
LR_PAD
false
4,673
[ "MIT" ]
0
a93a76ec7fdc76a5ba023adaed869e34f7f3cea4
https://github.com/zokin/HorizonNet/tree/a93a76ec7fdc76a5ba023adaed869e34f7f3cea4
MLPLayer
import torch from torch import nn class MLPLayer(nn.Module): def __init__(self, input_size, output_size, non_linearity=torch.sigmoid): super().__init__() self.lin1 = nn.Linear(input_size, input_size // 2) self.lin2 = nn.Linear(input_size // 2, output_size) self.non_lin = non_linearity def forward(self, x): out = self.non_lin(self.lin1(x)) return self.non_lin(self.lin2(out)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(128)](buf1, primals_2, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(256)](buf3, primals_5, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_4 class MLPLayerNew(nn.Module): def __init__(self, input_size, output_size, non_linearity=torch.sigmoid): super().__init__() self.lin1 = nn.Linear(input_size, input_size // 2) self.lin2 = nn.Linear(input_size // 2, output_size) self.non_lin = non_linearity def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
zoranmedic/LCR-design
MLPLayer
false
4,674
[ "MIT" ]
0
b722e4e9d00e8aaae36dd51ddc8131477ee805fd
https://github.com/zoranmedic/LCR-design/tree/b722e4e9d00e8aaae36dd51ddc8131477ee805fd
MultiheadAttention
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn.init import xavier_uniform_ class MultiheadAttention(nn.Module): """Allows the model to jointly attend to information from different representation subspaces. See reference: Attention Is All You Need .. math:: ext{MultiHead}(Q, K, V) = ext{Concat}(head_1,\\dots,head_h)W^O ext{where} head_i = ext{Attention}(QW_i^Q, KW_i^K, VW_i^V) Args: embed_dim: total dimension of the model num_heads: parallel attention layers, or heads """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.out_proj = Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() self.conv1 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv2 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv3 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) def _reset_parameters(self): xavier_uniform_(self.out_proj.weight) def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, attn_mask=None): """ Inputs of forward function query: [target length, batch size, embed dim] key: [sequence length, batch size, embed dim] value: [sequence length, batch size, embed dim] key_padding_mask: if True, mask padding based on batch size incremental_state: if provided, previous time steps are cashed need_weights: output attn_output_weights static_kv: key and value are static Outputs of forward function attn_output: [target length, batch size, embed dim] attn_output_weights: [batch size, target length, sequence length] """ q_shape = query.shape src_shape = key.shape q = self._in_proj_q(query) k = self._in_proj_k(key) v = self._in_proj_v(value) q *= self.scaling q = torch.reshape(q, (q_shape[0], q_shape[1], self.num_heads, self. head_dim)) q = q.permute(1, 2, 0, 3) k = torch.reshape(k, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) k = k.permute(1, 2, 0, 3) v = torch.reshape(v, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) v = v.permute(1, 2, 0, 3) if key_padding_mask is not None: assert key_padding_mask.shape[0] == q_shape[1] assert key_padding_mask.shape[1] == src_shape[0] attn_output_weights = torch.matmul(q, k.permute(0, 1, 3, 2)) if attn_mask is not None: attn_mask = torch.unsqueeze(torch.unsqueeze(attn_mask, 0), 0) attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = torch.reshape(attn_output_weights, [ q_shape[1], self.num_heads, q_shape[0], src_shape[0]]) key = torch.unsqueeze(torch.unsqueeze(key_padding_mask, 1), 2) key = key.type(torch.float32) y = torch.full(size=key.shape, fill_value=float('-Inf'), dtype= torch.float32) y = torch.where(key == 0.0, key, y) attn_output_weights += y attn_output_weights = F.softmax(attn_output_weights.type(torch. float32), dim=-1, dtype=torch.float32 if attn_output_weights. dtype == torch.float16 else attn_output_weights.dtype) attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_output_weights, v) attn_output = torch.reshape(attn_output.permute(2, 0, 1, 3), [ q_shape[0], q_shape[1], self.embed_dim]) attn_output = self.out_proj(attn_output) return attn_output def _in_proj_q(self, query): query = query.permute(1, 2, 0) query = torch.unsqueeze(query, dim=2) res = self.conv1(query) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_k(self, key): key = key.permute(1, 2, 0) key = torch.unsqueeze(key, dim=2) res = self.conv2(key) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_v(self, value): value = value.permute(1, 2, 0) value = torch.unsqueeze(value, dim=2) res = self.conv3(value) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn import Linear from torch.nn.init import xavier_uniform_ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y0 = yindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x3 + 16 * y0), tmp4, xmask & ymask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 4), (16, 4, 4, 1)) buf2 = buf0 del buf0 triton_poi_fused_convolution_0[grid(16, 4)](primals_2, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 4), (16, 4, 4, 1)) buf4 = buf2 del buf2 triton_poi_fused_convolution_0[grid(16, 4)](primals_7, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 4), (16, 4, 4, 1)) buf6 = buf3 del buf3 triton_poi_fused_convolution_1[grid(64)](buf6, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf7 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 triton_poi_fused_mul_2[grid(4, 16)](buf1, primals_4, buf7, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_4[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf11 = buf5 del buf5 triton_poi_fused_convolution_1[grid(64)](buf11, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf12 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 16)](buf12, buf13, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0) del buf12 extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0) del buf14 triton_poi_fused_add_6[grid(64)](buf15, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf16 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) triton_poi_fused_convolution_0[grid(16, 4)](buf7, buf16, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf7 return buf15, primals_3, primals_5, primals_8, reinterpret_tensor(primals_1 , (4, 4, 1, 4), (4, 1, 64, 16), 0), reinterpret_tensor(primals_2, ( 4, 4, 1, 4), (4, 1, 64, 16), 0), reinterpret_tensor(primals_7, (4, 4, 1, 4), (4, 1, 64, 16), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf11, (16, 1, 4), ( 4, 4, 1), 0), buf16, reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 4), 0) class MultiheadAttentionNew(nn.Module): """Allows the model to jointly attend to information from different representation subspaces. See reference: Attention Is All You Need .. math:: ext{MultiHead}(Q, K, V) = ext{Concat}(head_1,\\dots,head_h)W^O ext{where} head_i = ext{Attention}(QW_i^Q, KW_i^K, VW_i^V) Args: embed_dim: total dimension of the model num_heads: parallel attention layers, or heads """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super(MultiheadAttentionNew, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.out_proj = Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() self.conv1 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv2 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv3 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) def _reset_parameters(self): xavier_uniform_(self.out_proj.weight) def _in_proj_q(self, query): query = query.permute(1, 2, 0) query = torch.unsqueeze(query, dim=2) res = self.conv1(query) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_k(self, key): key = key.permute(1, 2, 0) key = torch.unsqueeze(key, dim=2) res = self.conv2(key) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_v(self, value): value = value.permute(1, 2, 0) value = torch.unsqueeze(value, dim=2) res = self.conv3(value) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def forward(self, input_0, input_1, input_2): primals_10 = self.out_proj.weight primals_4 = self.out_proj.bias primals_3 = self.conv1.weight primals_6 = self.conv1.bias primals_5 = self.conv2.weight primals_9 = self.conv2.bias primals_8 = self.conv3.weight primals_11 = self.conv3.bias primals_1 = input_0 primals_2 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
verages/PaddleOCR2Pytorch
MultiheadAttention
false
4,675
[ "Apache-2.0" ]
0
201f0d5d6007f49620c49af7d222c3b220eb3e70
https://github.com/verages/PaddleOCR2Pytorch/tree/201f0d5d6007f49620c49af7d222c3b220eb3e70
ReadUnit
import torch from torch import nn import torch.nn.functional as F from torch.nn.init import xavier_uniform_ def linear(in_dim, out_dim, bias=True): lin = nn.Linear(in_dim, out_dim, bias=bias) xavier_uniform_(lin.weight) if bias: lin.bias.data.zero_() return lin class ReadUnit(nn.Module): def __init__(self, dim): super().__init__() self.mem = linear(dim, dim) self.concat = linear(dim * 2, dim) self.attn = linear(dim, 1) def forward(self, memories, k, c): """ :param memories: :param k: knowledge :param c: control :return: r_i """ m_prev = memories[-1] I = self.mem(m_prev).unsqueeze(2) * k I = self.concat(torch.cat([I, k], 1).permute(0, 2, 1)) attn = I * c[-1].unsqueeze(1) attn = self.attn(attn).squeeze(2) attn = F.softmax(attn, 1).unsqueeze(1) read = (attn * k).sum(2) return read def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn.init import xavier_uniform_ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x1 = xindex // 8 % 4 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr2 + (x1 + 4 * x0 + 16 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (x1 + 4 * (-4 + x0) + 16 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x4, tmp16, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (12 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(128)](buf0, primals_3, primals_4, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 triton_poi_fused_add_mul_1[grid(64)](buf3, primals_6, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf5 = reinterpret_tensor(buf0, (16, 1), (1, 1), 0) del buf0 extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_3[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused_mul_sum_4[grid(16)](buf7, primals_4, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf7 return buf8, primals_4, reinterpret_tensor(primals_1, (4, 4), (4, 1), 48 ), reinterpret_tensor(buf1, (16, 8), (8, 1), 0), reinterpret_tensor( primals_7, (4, 1), (1, 1), 12), reinterpret_tensor(buf3, (16, 4), ( 4, 1), 0), buf5, primals_8, primals_5 def linear(in_dim, out_dim, bias=True): lin = nn.Linear(in_dim, out_dim, bias=bias) xavier_uniform_(lin.weight) if bias: lin.bias.data.zero_() return lin class ReadUnitNew(nn.Module): def __init__(self, dim): super().__init__() self.mem = linear(dim, dim) self.concat = linear(dim * 2, dim) self.attn = linear(dim, 1) def forward(self, input_0, input_1, input_2): primals_2 = self.mem.weight primals_3 = self.mem.bias primals_5 = self.concat.weight primals_6 = self.concat.bias primals_8 = self.attn.weight primals_9 = self.attn.bias primals_1 = input_0 primals_4 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
zorache/mac-network-pytorch-gqa
ReadUnit
false
4,676
[ "MIT" ]
0
5de0a906410af0596f7b5dc159ce7db82bd37418
https://github.com/zorache/mac-network-pytorch-gqa/tree/5de0a906410af0596f7b5dc159ce7db82bd37418
CriterionKD
import torch import torch.nn as nn from torch.nn import functional as F import torch._utils import torch.optim class CriterionKD(nn.Module): """ knowledge distillation loss """ def __init__(self, upsample=False, temperature=4): super(CriterionKD, self).__init__() self.upsample = upsample self.temperature = temperature self.criterion_kd = torch.nn.KLDivLoss() def forward(self, pred, soft): soft.detach() h, w = soft.size(2), soft.size(3) if self.upsample: scale_pred = F.interpolate(input=pred, size=(h * 2, w * 2), mode='bilinear', align_corners=True) scale_soft = F.interpolate(input=soft, size=(h * 2, w * 2), mode='bilinear', align_corners=True) else: scale_pred = pred scale_soft = soft loss = self.criterion_kd(F.log_softmax(scale_pred / self. temperature, dim=1), F.softmax(scale_soft / self.temperature, dim=1)) return loss * self.temperature * self.temperature def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch._utils import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 256.0 tmp37 = tmp35 / tmp36 tmp38 = 4.0 tmp39 = tmp37 * tmp38 tmp40 = tmp39 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp40, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg1_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2[grid(1)]( buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class CriterionKDNew(nn.Module): """ knowledge distillation loss """ def __init__(self, upsample=False, temperature=4): super(CriterionKDNew, self).__init__() self.upsample = upsample self.temperature = temperature self.criterion_kd = torch.nn.KLDivLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
yubin1219/Semantic-Seg
CriterionKD
false
4,677
[ "BSD-2-Clause" ]
0
c40bd43d3d7e44bc995b8d041736580dec084251
https://github.com/yubin1219/Semantic-Seg/tree/c40bd43d3d7e44bc995b8d041736580dec084251
SiaLoss
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.distributed class SiaLoss(nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(SiaLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2) loss = torch.mean(label * torch.pow(euclidean_distance, 2) + (1 - label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp24, xmask) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp3 = tmp0 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp0 tmp6 = 2.0 tmp7 = tmp6 - tmp1 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp5 * tmp10 tmp12 = tmp3 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class SiaLossNew(nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(SiaLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
zwzhang121/OpenUnReID
SiaLoss
false
4,678
[ "Apache-2.0" ]
0
4f399efca3d560c608fb4c9c2ed43f522b17596a
https://github.com/zwzhang121/OpenUnReID/tree/4f399efca3d560c608fb4c9c2ed43f522b17596a
F_fully_convolutional
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class F_fully_convolutional(nn.Module): def __init__(self, in_channels, out_channels, internal_size=256, kernel_size=3, leaky_slope=0.02): super().__init__() pad = kernel_size // 2 self.leaky_slope = leaky_slope self.conv1 = nn.Conv2d(in_channels, internal_size, kernel_size= kernel_size, padding=pad) self.conv2 = nn.Conv2d(in_channels + internal_size, internal_size, kernel_size=kernel_size, padding=pad) self.conv3 = nn.Conv2d(in_channels + 2 * internal_size, out_channels, kernel_size=1, padding=0) def forward(self, x): x1 = F.leaky_relu(self.conv1(x), self.leaky_slope) x2 = F.leaky_relu(self.conv2(torch.cat([x, x1], 1)), self.leaky_slope) return self.conv3(torch.cat([x, x1, x2], 1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 260 y1 = yindex // 260 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 260 * x2 + 2340 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 260 x1 = xindex // 260 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 260, tl.int64) tmp9 = tl.load(in_ptr1 + (256 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1) tmp10 = tl.load(in_ptr2 + (256 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr3 + (-4 + x0), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = 0.02 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp6, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp5, tmp17) tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 33024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 516 x1 = xindex // 516 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 260, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (256 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1) tmp11 = tl.load(in_ptr2 + (256 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr3 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = 0.02 tmp15 = tmp13 * tmp14 tmp16 = tl.where(tmp10, tmp13, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp9, tmp16, tmp17) tmp19 = tmp0 >= tmp7 tl.full([1], 516, tl.int64) tmp22 = tl.load(in_ptr4 + (256 * x1 + (-260 + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1) tmp23 = tl.load(in_ptr5 + (256 * x1 + (-260 + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr6 + (-260 + x0), tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tmp25 * tmp14 tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp19, tmp27, tmp28) tmp30 = tl.where(tmp9, tmp18, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 260, 3, 3), (2340, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 516, 1, 1), (516, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32 ) get_raw_stream(0) triton_poi_fused_0[grid(1024, 9)](primals_1, buf0, 1024, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((256, 260, 3, 3), (2340, 1, 780, 260), torch.float32) triton_poi_fused_2[grid(66560, 9)](primals_4, buf2, 66560, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf4 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool) triton_poi_fused_convolution_leaky_relu_3[grid(16384)](buf3, primals_2, buf4, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 260, 4, 4), (4160, 1, 1040, 260), torch.float32) triton_poi_fused_cat_4[grid(16640)](buf1, buf4, buf3, primals_2, buf5, 16640, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf7 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool) triton_poi_fused_convolution_leaky_relu_3[grid(16384)](buf6, primals_5, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 516, 4, 4), (8256, 1, 2064, 516), torch.float32) triton_poi_fused_cat_5[grid(33024)](buf1, buf4, buf3, primals_2, buf7, buf6, primals_5, buf8, 33024, XBLOCK=512, num_warps=4, num_stages=1) del buf3 del buf6 del primals_2 del primals_5 buf9 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 1, 16, 4)) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_6[grid(16, 16)](buf9, primals_7, buf10, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf9 del primals_7 return buf10, buf0, buf1, buf2, primals_6, buf4, buf5, buf7, buf8 class F_fully_convolutionalNew(nn.Module): def __init__(self, in_channels, out_channels, internal_size=256, kernel_size=3, leaky_slope=0.02): super().__init__() pad = kernel_size // 2 self.leaky_slope = leaky_slope self.conv1 = nn.Conv2d(in_channels, internal_size, kernel_size= kernel_size, padding=pad) self.conv2 = nn.Conv2d(in_channels + internal_size, internal_size, kernel_size=kernel_size, padding=pad) self.conv3 = nn.Conv2d(in_channels + 2 * internal_size, out_channels, kernel_size=1, padding=0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
zimmerrol/FrEIA
F_fully_convolutional
false
4,679
[ "MIT" ]
0
73d01ab8c90e0deb5e242d66405bd168db06dc19
https://github.com/zimmerrol/FrEIA/tree/73d01ab8c90e0deb5e242d66405bd168db06dc19
C3
import torch import torch.nn as nn from collections import OrderedDict class C3(nn.Module): def __init__(self): super(C3, self).__init__() self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(16, 120, kernel_size=(5, 5))), ('relu3', nn.ReLU())])) def forward(self, img): output = self.c3(img) return output def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 1920 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 400 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 480 xnumel = 3600 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 120 y1 = yindex // 120 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 120 * x2 + 432000 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 3600 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 120 * x2 + 432000 * y1), tmp6, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (120, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_2, (120,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((120, 16, 5, 5), (400, 1, 80, 16), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(1920, 25)](primals_1, buf0, 1920, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16), torch.float32) triton_poi_fused_1[grid(64, 4096)](primals_3, buf1, 64, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 120, 60, 60), (432000, 1, 7200, 120)) buf3 = empty_strided_cuda((4, 120, 60, 60), (432000, 3600, 60, 1), torch.float32) buf4 = empty_strided_cuda((4, 120, 60, 60), (432000, 1, 7200, 120), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(480, 3600) ](buf2, primals_2, buf3, buf4, 480, 3600, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf2 del primals_2 return buf3, buf0, buf1, buf4 class C3New(nn.Module): def __init__(self): super(C3New, self).__init__() self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(16, 120, kernel_size=(5, 5))), ('relu3', nn.ReLU())])) def forward(self, input_0): primals_1 = self.c3.c3.weight primals_2 = self.c3.c3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
zjgbz/img_cls
C3
false
4,680
[ "MIT" ]
0
513d5ae423d95e008a82a6ffe443db49f8ed9ac2
https://github.com/zjgbz/img_cls/tree/513d5ae423d95e008a82a6ffe443db49f8ed9ac2
AngleSimpleLinear
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models from torch.nn import Parameter from torch.nn.parameter import Parameter import torch.onnx import torch.nn class AngleSimpleLinear(nn.Module): """Computes cos of angles between input vectors and weights vectors""" def __init__(self, in_features, out_features): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0) def forward(self, x): cos_theta = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0)) return cos_theta.clamp(-1.0 + 1e-07, 1.0 - 1e-07), def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torchvision import models as models from torch.nn import Parameter from torch.nn.parameter import Parameter import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.9999999 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 0.9999999 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = tmp0 >= tmp1 tmp6 = tmp0 <= tmp3 tmp7 = tmp5 & tmp6 tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp7, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) buf3 = buf1 del buf1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_clamp_ge_le_logical_and_2[grid(16)](buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 return buf3, primals_2, buf4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0) class AngleSimpleLinearNew(nn.Module): """Computes cos of angles between input vectors and weights vectors""" def __init__(self, in_features, out_features): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ygnn123/training_extensions
AngleSimpleLinear
false
4,681
[ "Apache-2.0" ]
0
c3aeba9359b0d4e0ef9c054de777d3ec081a9892
https://github.com/ygnn123/training_extensions/tree/c3aeba9359b0d4e0ef9c054de777d3ec081a9892
TKipfGCN
import math import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn import Parameter class BaseModel(nn.Module): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" pass @classmethod def build_model_from_args(cls, args): """Build a new model instance.""" raise NotImplementedError( 'Models must implement the build_model_from_args method') class GraphConvolution(nn.Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.normal_(-stdv, stdv) if self.bias is not None: self.bias.data.normal_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class TKipfGCN(BaseModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--num-features', type=int) parser.add_argument('--num-classes', type=int) parser.add_argument('--hidden-size', type=int, default=64) parser.add_argument('--dropout', type=float, default=0.5) @classmethod def build_model_from_args(cls, args): return cls(args.num_features, args.hidden_size, args.num_classes, args.dropout) def __init__(self, nfeat, nhid, nclass, dropout): super(TKipfGCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return F.log_softmax(x, dim=-1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.utils.data import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_3, buf3, alpha=1, beta=1, out=buf4) del primals_6 buf5 = buf3 del buf3 triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class BaseModel(nn.Module): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" pass @classmethod def build_model_from_args(cls, args): """Build a new model instance.""" raise NotImplementedError( 'Models must implement the build_model_from_args method') class GraphConvolution(nn.Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.normal_(-stdv, stdv) if self.bias is not None: self.bias.data.normal_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class TKipfGCNNew(BaseModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--num-features', type=int) parser.add_argument('--num-classes', type=int) parser.add_argument('--hidden-size', type=int, default=64) parser.add_argument('--dropout', type=float, default=0.5) @classmethod def build_model_from_args(cls, args): return cls(args.num_features, args.hidden_size, args.num_classes, args.dropout) def __init__(self, nfeat, nhid, nclass, dropout): super(TKipfGCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
zxhhh97/cogdl
TKipfGCN
false
4,682
[ "MIT" ]
0
de21c78d9bbbf0c6cafbc72ff241cda35693ec37
https://github.com/zxhhh97/cogdl/tree/de21c78d9bbbf0c6cafbc72ff241cda35693ec37
FCDiscriminator_Local
import torch import torch.nn as nn class FCDiscriminator_Local(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminator_Local, self).__init__() self.conv1 = nn.Conv2d(num_classes + 2048, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 4, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear') def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.conv3(x) x = self.leaky_relu(x) x = self.classifier(x) x = self.up_sample(x) return x def get_inputs(): return [torch.rand([4, 2052, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused__to_copy_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.03125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_4(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.03125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.03125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 128 % 128 x0 = xindex % 128 x2 = xindex // 16384 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + 0) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp13 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tmp12 = tmp9 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tmp13 < 0 tmp16 = tl.where(tmp15, tmp14, tmp13) tmp17 = tl.load(in_ptr2 + (tmp16 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tmp18 = tmp17 + tmp11 tmp19 = tmp18 - tmp12 tmp21 = tmp19 * tmp20 tmp22 = tmp12 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp23 < 0 tmp26 = tl.where(tmp25, tmp24, tmp23) tmp27 = tl.load(in_ptr2 + (tmp8 + 4 * tmp26 + 16 * x2), None, eviction_policy='evict_last') tmp28 = tmp27 + tmp11 tmp29 = tl.load(in_ptr2 + (tmp16 + 4 * tmp26 + 16 * x2), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp11 tmp31 = tmp30 - tmp28 tmp32 = tmp31 * tmp20 tmp33 = tmp28 + tmp32 tmp34 = tmp33 - tmp22 tmp36 = tmp34 * tmp35 tmp37 = tmp22 + tmp36 tl.store(in_out_ptr0 + x3, tmp37, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 2052, 4, 4), (32832, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 2052, 64, 64), (8404992, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (1, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(131072)](buf3, primals_5, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(65536)](buf5, primals_7, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1)) buf7 = empty_strided_cuda((128, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_3[grid(128)](buf7, 128, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((128, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_4[grid(128)](buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((128,), (1,), torch.int64) triton_poi_fused__to_copy_3[grid(128)](buf9, 128, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((128,), (1,), torch.int64) triton_poi_fused_add_clamp_4[grid(128)](buf10, 128, XBLOCK=128, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(128)](buf11, 128, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((128, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(128)](buf13, 128, XBLOCK=128, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((4, 1, 128, 128), (16384, 65536, 128, 1), torch.float32) buf15 = reinterpret_tensor(buf14, (4, 1, 128, 128), (16384, 16384, 128, 1), 0) del buf14 triton_poi_fused__unsafe_index_add_convolution_mul_sub_6[grid(65536)]( buf15, buf7, buf9, buf6, primals_9, buf10, buf11, buf8, buf13, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_9 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf7, buf8, buf9, buf10, buf11, buf13) class FCDiscriminator_LocalNew(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminator_LocalNew, self).__init__() self.conv1 = nn.Conv2d(num_classes + 2048, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1) self.classifier = nn.Conv2d(ndf * 4, 1, kernel_size=4, stride=2, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear') def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.classifier.weight primals_9 = self.classifier.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
shiyutang/CLAN
FCDiscriminator_Local
false
4,683
[ "MIT" ]
0
920bd7cb592ba79ee5058f8cd662d20eda50457e
https://github.com/shiyutang/CLAN/tree/920bd7cb592ba79ee5058f8cd662d20eda50457e
VAE
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class VAE(nn.Module): def __init__(self, z_dim): super().__init__() self.z_dim = z_dim self.fc1 = nn.Linear(784, 500) self.fc21 = nn.Linear(500, self.z_dim) self.fc22 = nn.Linear(500, self.z_dim) self.fc3 = nn.Linear(self.z_dim, 500) self.fc4 = nn.Linear(500, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) mu = self.fc21(h1) logvar = self.fc22(h1) return mu, logvar def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.rand_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): x = x.view(-1, 784) mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {'z_dim': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (500, 784), (784, 1)) assert_size_stride(primals_3, (500,), (1,)) assert_size_stride(primals_4, (4, 500), (500, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 500), (500, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (500, 4), (4, 1)) assert_size_stride(primals_9, (500,), (1,)) assert_size_stride(primals_10, (784, 500), (500, 1)) assert_size_stride(primals_11, (784,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 500), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2000)](buf1, primals_3, 2000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (500, 4), (1, 500), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (500, 4), (1, 500), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = torch.ops.aten.rand.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(16)](buf2, buf5, buf3, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (4, 500), (1, 4), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_0[grid(2000)](buf8, primals_9, 2000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (500, 784), (1, 500), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_sigmoid_2[grid(3136)](buf10, primals_11, 3136, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8, buf10, primals_10, primals_8, primals_6, primals_4) class VAENew(nn.Module): def __init__(self, z_dim): super().__init__() self.z_dim = z_dim self.fc1 = nn.Linear(784, 500) self.fc21 = nn.Linear(500, self.z_dim) self.fc22 = nn.Linear(500, self.z_dim) self.fc3 = nn.Linear(self.z_dim, 500) self.fc4 = nn.Linear(500, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) mu = self.fc21(h1) logvar = self.fc22(h1) return mu, logvar def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.rand_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc21.weight primals_5 = self.fc21.bias primals_6 = self.fc22.weight primals_7 = self.fc22.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_10 = self.fc4.weight primals_11 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2]
zyzisyz/torch-practice
VAE
false
4,684
[ "Apache-2.0" ]
0
92f2b7f1a01bbabd1a2cf2a4dd9099a0eeb9cf00
https://github.com/zyzisyz/torch-practice/tree/92f2b7f1a01bbabd1a2cf2a4dd9099a0eeb9cf00
Greedy
import torch import torch.nn as nn from matplotlib.font_manager import * class Greedy(nn.Module): def __init__(self): super().__init__() def forward(self, log_p): return torch.argmax(log_p, dim=1).long() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from matplotlib.font_manager import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x2, tmp46, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class GreedyNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zifeiyu0531/TSP_DRL_PtrNet
Greedy
false
4,685
[ "MIT" ]
0
c62fab73347556173d301c1561edf927e6fbe1d7
https://github.com/zifeiyu0531/TSP_DRL_PtrNet/tree/c62fab73347556173d301c1561edf927e6fbe1d7
Categorical
import torch import torch.nn as nn from matplotlib.font_manager import * class Categorical(nn.Module): def __init__(self): super().__init__() def forward(self, log_p): return torch.multinomial(log_p.exp(), 1).long().squeeze(1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from matplotlib.font_manager import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = torch.ops.aten.multinomial.default(buf0, 1) del buf0 buf2 = buf1 del buf1 return reinterpret_tensor(buf2, (4,), (1,), 0), class CategoricalNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zifeiyu0531/TSP_DRL_PtrNet
Categorical
false
4,686
[ "MIT" ]
0
c62fab73347556173d301c1561edf927e6fbe1d7
https://github.com/zifeiyu0531/TSP_DRL_PtrNet/tree/c62fab73347556173d301c1561edf927e6fbe1d7
ScaledDotProductAttention
import math import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class ScaledDotProductAttention(nn.Module): def __init__(self, dropout_ratio=0): super().__init__() self.dropout = nn.Dropout(dropout_ratio) def forward(self, query, keys, values, mask=None): attn = torch.matmul(query, keys.transpose(-2, -1)) attn /= math.sqrt(query.shape[-1]) if mask is None: mask = attn.new_ones(attn.shape) if mask.dim() < attn.dim(): mask = mask.unsqueeze(-2) mask = self.dropout(mask) attn = attn.masked_fill(mask == 0, -1000.0) attn = F.softmax(attn, dim=-1) output = torch.matmul(attn, values) return output, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_eq_masked_fill_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tl.full([1], False, tl.int1) tmp4 = -1000.0 tmp5 = tl.where(tmp3, tmp4, tmp2) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp3, tmp4, tmp7) tmp10 = tmp9 * tmp1 tmp11 = tl.where(tmp3, tmp4, tmp10) tmp12 = triton_helpers.maximum(tmp8, tmp11) tmp14 = tmp13 * tmp1 tmp15 = tl.where(tmp3, tmp4, tmp14) tmp16 = triton_helpers.maximum(tmp12, tmp15) tmp18 = tmp17 * tmp1 tmp19 = tl.where(tmp3, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp16, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_eq_masked_fill_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class ScaledDotProductAttentionNew(nn.Module): def __init__(self, dropout_ratio=0): super().__init__() self.dropout = nn.Dropout(dropout_ratio) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
ygnn123/training_extensions
ScaledDotProductAttention
false
4,687
[ "Apache-2.0" ]
0
c3aeba9359b0d4e0ef9c054de777d3ec081a9892
https://github.com/ygnn123/training_extensions/tree/c3aeba9359b0d4e0ef9c054de777d3ec081a9892
SageConv
from torch.nn import Module import torch import torch.nn as nn from torch.nn.modules.module import Module class SageConv(Module): """ Simple Graphsage layer """ def __init__(self, in_features, out_features, bias=False): super(SageConv, self).__init__() self.proj = nn.Linear(in_features * 2, out_features, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.proj.weight) if self.proj.bias is not None: nn.init.constant_(self.proj.bias, 0.0) def forward(self, features, adj): """ Args: adj: can be sparse or dense matrix. """ if not isinstance(adj, torch.sparse.FloatTensor): if len(adj.shape) == 3: neigh_feature = torch.bmm(adj, features) / (adj.sum(dim=1). reshape((adj.shape[0], adj.shape[1], -1)) + 1) else: neigh_feature = torch.mm(adj, features) / (adj.sum(dim=1). reshape(adj.shape[0], -1) + 1) else: neigh_feature = torch.spmm(adj, features) / (adj.to_dense().sum (dim=1).reshape(adj.shape[0], -1) + 1) data = torch.cat([features, neigh_feature], dim=-1) combined = self.proj(data) return combined def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn as nn from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + 4 * x1, tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tl.load(in_ptr2 + (1 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = tl.load(in_ptr2 + (2 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp12 + tmp13 tmp15 = tl.load(in_ptr2 + (3 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = 1.0 tmp18 = tmp16 + tmp17 tmp19 = tmp9 / tmp18 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp6, tmp19, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_2, buf0, primals_1, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf2 = buf0 del buf0 extern_kernels.mm(buf1, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf2) del primals_3 return buf2, buf1 class SageConvNew(Module): """ Simple Graphsage layer """ def __init__(self, in_features, out_features, bias=False): super(SageConvNew, self).__init__() self.proj = nn.Linear(in_features * 2, out_features, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.proj.weight) if self.proj.bias is not None: nn.init.constant_(self.proj.bias, 0.0) def forward(self, input_0, input_1): primals_3 = self.proj.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
yutaoming/Rare-Category-Detection
SageConv
false
4,688
[ "MIT" ]
0
76cf023dff44eef3ecc17f0ebf2b11a08cd63a73
https://github.com/yutaoming/Rare-Category-Detection/tree/76cf023dff44eef3ecc17f0ebf2b11a08cd63a73
LogitKLDivLoss
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class LogitKLDivLoss(nn.Module): """Kullback–Leibler divergence loss. Inputs predicted and ground truth logits. Args: T (float): Softmax temperature. """ def __init__(self, T=1): super().__init__() self.T = T def forward(self, p_logits, q_logits, **kwargs): log_p = F.log_softmax(p_logits / self.T, dim=1) q = F.softmax(q_logits / self.T, dim=1) return F.kl_div(log_p, q, reduction='batchmean') * self.T ** 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 0.25 tmp37 = tmp35 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class LogitKLDivLossNew(nn.Module): """Kullback–Leibler divergence loss. Inputs predicted and ground truth logits. Args: T (float): Softmax temperature. """ def __init__(self, T=1): super().__init__() self.T = T def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ygnn123/training_extensions
LogitKLDivLoss
false
4,689
[ "Apache-2.0" ]
0
c3aeba9359b0d4e0ef9c054de777d3ec081a9892
https://github.com/ygnn123/training_extensions/tree/c3aeba9359b0d4e0ef9c054de777d3ec081a9892
TransformerEncoderLayer
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn.init import xavier_uniform_ from torch.nn import Dropout from torch.nn import LayerNorm class MultiheadAttention(nn.Module): """Allows the model to jointly attend to information from different representation subspaces. See reference: Attention Is All You Need .. math:: ext{MultiHead}(Q, K, V) = ext{Concat}(head_1,\\dots,head_h)W^O ext{where} head_i = ext{Attention}(QW_i^Q, KW_i^K, VW_i^V) Args: embed_dim: total dimension of the model num_heads: parallel attention layers, or heads """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.out_proj = Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() self.conv1 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv2 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv3 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) def _reset_parameters(self): xavier_uniform_(self.out_proj.weight) def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, attn_mask=None): """ Inputs of forward function query: [target length, batch size, embed dim] key: [sequence length, batch size, embed dim] value: [sequence length, batch size, embed dim] key_padding_mask: if True, mask padding based on batch size incremental_state: if provided, previous time steps are cashed need_weights: output attn_output_weights static_kv: key and value are static Outputs of forward function attn_output: [target length, batch size, embed dim] attn_output_weights: [batch size, target length, sequence length] """ q_shape = query.shape src_shape = key.shape q = self._in_proj_q(query) k = self._in_proj_k(key) v = self._in_proj_v(value) q *= self.scaling q = torch.reshape(q, (q_shape[0], q_shape[1], self.num_heads, self. head_dim)) q = q.permute(1, 2, 0, 3) k = torch.reshape(k, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) k = k.permute(1, 2, 0, 3) v = torch.reshape(v, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) v = v.permute(1, 2, 0, 3) if key_padding_mask is not None: assert key_padding_mask.shape[0] == q_shape[1] assert key_padding_mask.shape[1] == src_shape[0] attn_output_weights = torch.matmul(q, k.permute(0, 1, 3, 2)) if attn_mask is not None: attn_mask = torch.unsqueeze(torch.unsqueeze(attn_mask, 0), 0) attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = torch.reshape(attn_output_weights, [ q_shape[1], self.num_heads, q_shape[0], src_shape[0]]) key = torch.unsqueeze(torch.unsqueeze(key_padding_mask, 1), 2) key = key.type(torch.float32) y = torch.full(size=key.shape, fill_value=float('-Inf'), dtype= torch.float32) y = torch.where(key == 0.0, key, y) attn_output_weights += y attn_output_weights = F.softmax(attn_output_weights.type(torch. float32), dim=-1, dtype=torch.float32 if attn_output_weights. dtype == torch.float16 else attn_output_weights.dtype) attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_output_weights, v) attn_output = torch.reshape(attn_output.permute(2, 0, 1, 3), [ q_shape[0], q_shape[1], self.embed_dim]) attn_output = self.out_proj(attn_output) return attn_output def _in_proj_q(self, query): query = query.permute(1, 2, 0) query = torch.unsqueeze(query, dim=2) res = self.conv1(query) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_k(self, key): key = key.permute(1, 2, 0) key = torch.unsqueeze(key, dim=2) res = self.conv2(key) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_v(self, value): value = value.permute(1, 2, 0) value = torch.unsqueeze(value, dim=2) res = self.conv3(value) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res class TransformerEncoderLayer(nn.Module): """TransformerEncoderLayer is made up of self-attn and feedforward network. This standard encoder layer is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 6000-6010. Users may modify or implement in a different way during application. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). """ def __init__(self, d_model, nhead, dim_feedforward=2048, attention_dropout_rate=0.0, residual_dropout_rate=0.1): super(TransformerEncoderLayer, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout= attention_dropout_rate) self.conv1 = nn.Conv2d(in_channels=d_model, out_channels= dim_feedforward, kernel_size=(1, 1)) self.conv2 = nn.Conv2d(in_channels=dim_feedforward, out_channels= d_model, kernel_size=(1, 1)) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(residual_dropout_rate) self.dropout2 = Dropout(residual_dropout_rate) def forward(self, src, src_mask=None, src_key_padding_mask=None): """Pass the input through the endocder layer. Args: src: the sequnce to the encoder layer (required). src_mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). """ src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) src = self.norm1(src) src = src.permute(1, 2, 0) src = torch.unsqueeze(src, 2) src2 = self.conv2(F.relu(self.conv1(src))) src2 = torch.squeeze(src2, 2) src2 = src2.permute(2, 0, 1) src = torch.squeeze(src, 2) src = src.permute(2, 0, 1) src = src + self.dropout2(src2) src = self.norm2(src) return src def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn.init import xavier_uniform_ from torch.nn import Dropout from torch.nn import LayerNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) tl.store(out_ptr1 + x3, tmp0, xmask) tl.store(out_ptr2 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y4 = yindex x3 = xindex // 4 y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x5 + 16 * y4), xmask & ymask) tmp1 = tl.load(in_ptr0 + (4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (y0 + 4 * x5 + 64 * y1), tmp8, xmask & ymask) @triton.jit def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 64 * (x0 // 4) + x0 % 4), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_unsqueeze_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1), tmp13, xmask) @triton.jit def triton_poi_fused_transpose_14(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (2048, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_13, (2048,), (1,)) assert_size_stride(primals_14, (4, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) buf4 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](primals_1, buf0, buf2, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 4), (16, 1, 16, 4)) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 4), (16, 1, 16, 4)) buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 4), (16, 1, 16, 4)) buf6 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf4 triton_poi_fused_convolution_1[grid(16, 4)](buf3, primals_5, buf6, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf7 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 triton_poi_fused_mul_2[grid(64)](buf1, primals_3, buf7, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 1, 16, 4), 0) del buf8 triton_poi_fused__softmax_4[grid(16, 16)](buf9, buf10, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf1 triton_poi_fused_convolution_1[grid(16, 4)](buf5, primals_7, buf11, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf12 = reinterpret_tensor(buf9, (16, 4, 4), (1, 64, 16), 0) del buf9 triton_poi_fused_bmm_5[grid(256)](buf10, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0) del buf5 extern_kernels.bmm(buf12, reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf13) del buf12 buf14 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 triton_poi_fused_clone_6[grid(4, 16)](buf13, buf14, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0) del buf13 extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15) buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf15, primals_9, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_1, buf15, primals_9, buf16, buf17, primals_10, primals_11, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf19 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) triton_poi_fused_unsqueeze_9[grid(64)](buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 2048, 1, 4), (8192, 1, 8192, 2048)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_10[grid(32768)](buf21, primals_13, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf22 = extern_kernels.convolution(buf21, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 4, 1, 4), (16, 1, 16, 4)) buf23 = buf22 del buf22 triton_poi_fused_convolution_11[grid(64)](buf23, primals_15, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 buf24 = reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 16), 0) del buf17 buf25 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0) del buf16 triton_poi_fused_add_native_layer_norm_12[grid(16)](buf19, buf23, buf24, buf25, 16, XBLOCK=16, num_warps=1, num_stages=1) buf26 = buf18 del buf18 triton_poi_fused_add_native_layer_norm_13[grid(64)](buf19, buf23, buf24, buf25, primals_16, primals_17, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf24 del buf25 del primals_17 buf27 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) triton_poi_fused_transpose_14[grid(16, 4)](buf7, buf27, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf7 return (buf26, primals_1, primals_2, primals_4, primals_6, primals_9, primals_10, primals_12, primals_14, primals_16, buf10, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, buf19, buf21, buf23, primals_8, reinterpret_tensor(buf11, (16, 1, 4), (4, 4, 1), 0), buf27, reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 4), 0)) class MultiheadAttention(nn.Module): """Allows the model to jointly attend to information from different representation subspaces. See reference: Attention Is All You Need .. math:: ext{MultiHead}(Q, K, V) = ext{Concat}(head_1,\\dots,head_h)W^O ext{where} head_i = ext{Attention}(QW_i^Q, KW_i^K, VW_i^V) Args: embed_dim: total dimension of the model num_heads: parallel attention layers, or heads """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.out_proj = Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() self.conv1 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv2 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv3 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) def _reset_parameters(self): xavier_uniform_(self.out_proj.weight) def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, attn_mask=None): """ Inputs of forward function query: [target length, batch size, embed dim] key: [sequence length, batch size, embed dim] value: [sequence length, batch size, embed dim] key_padding_mask: if True, mask padding based on batch size incremental_state: if provided, previous time steps are cashed need_weights: output attn_output_weights static_kv: key and value are static Outputs of forward function attn_output: [target length, batch size, embed dim] attn_output_weights: [batch size, target length, sequence length] """ q_shape = query.shape src_shape = key.shape q = self._in_proj_q(query) k = self._in_proj_k(key) v = self._in_proj_v(value) q *= self.scaling q = torch.reshape(q, (q_shape[0], q_shape[1], self.num_heads, self. head_dim)) q = q.permute(1, 2, 0, 3) k = torch.reshape(k, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) k = k.permute(1, 2, 0, 3) v = torch.reshape(v, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) v = v.permute(1, 2, 0, 3) if key_padding_mask is not None: assert key_padding_mask.shape[0] == q_shape[1] assert key_padding_mask.shape[1] == src_shape[0] attn_output_weights = torch.matmul(q, k.permute(0, 1, 3, 2)) if attn_mask is not None: attn_mask = torch.unsqueeze(torch.unsqueeze(attn_mask, 0), 0) attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = torch.reshape(attn_output_weights, [ q_shape[1], self.num_heads, q_shape[0], src_shape[0]]) key = torch.unsqueeze(torch.unsqueeze(key_padding_mask, 1), 2) key = key.type(torch.float32) y = torch.full(size=key.shape, fill_value=float('-Inf'), dtype= torch.float32) y = torch.where(key == 0.0, key, y) attn_output_weights += y attn_output_weights = F.softmax(attn_output_weights.type(torch. float32), dim=-1, dtype=torch.float32 if attn_output_weights. dtype == torch.float16 else attn_output_weights.dtype) attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_output_weights, v) attn_output = torch.reshape(attn_output.permute(2, 0, 1, 3), [ q_shape[0], q_shape[1], self.embed_dim]) attn_output = self.out_proj(attn_output) return attn_output def _in_proj_q(self, query): query = query.permute(1, 2, 0) query = torch.unsqueeze(query, dim=2) res = self.conv1(query) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_k(self, key): key = key.permute(1, 2, 0) key = torch.unsqueeze(key, dim=2) res = self.conv2(key) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_v(self, value): value = value.permute(1, 2, 0) value = torch.unsqueeze(value, dim=2) res = self.conv3(value) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res class TransformerEncoderLayerNew(nn.Module): """TransformerEncoderLayer is made up of self-attn and feedforward network. This standard encoder layer is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 6000-6010. Users may modify or implement in a different way during application. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). """ def __init__(self, d_model, nhead, dim_feedforward=2048, attention_dropout_rate=0.0, residual_dropout_rate=0.1): super(TransformerEncoderLayerNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout= attention_dropout_rate) self.conv1 = nn.Conv2d(in_channels=d_model, out_channels= dim_feedforward, kernel_size=(1, 1)) self.conv2 = nn.Conv2d(in_channels=dim_feedforward, out_channels= d_model, kernel_size=(1, 1)) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(residual_dropout_rate) self.dropout2 = Dropout(residual_dropout_rate) def forward(self, input_0): primals_8 = self.self_attn.out_proj.weight primals_3 = self.self_attn.out_proj.bias primals_2 = self.self_attn.conv1.weight primals_5 = self.self_attn.conv1.bias primals_4 = self.self_attn.conv2.weight primals_7 = self.self_attn.conv2.bias primals_6 = self.self_attn.conv3.weight primals_9 = self.self_attn.conv3.bias primals_12 = self.conv1.weight primals_13 = self.conv1.bias primals_14 = self.conv2.weight primals_10 = self.conv2.bias primals_11 = self.norm1.weight primals_15 = self.norm1.bias primals_16 = self.norm2.weight primals_17 = self.norm2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
verages/PaddleOCR2Pytorch
TransformerEncoderLayer
false
4,690
[ "Apache-2.0" ]
0
201f0d5d6007f49620c49af7d222c3b220eb3e70
https://github.com/verages/PaddleOCR2Pytorch/tree/201f0d5d6007f49620c49af7d222c3b220eb3e70
GaussianKernel
import torch import torch.nn as nn class GaussianKernel(nn.Module): """ Gaussian kernel module. :param mu: Float, mean of the kernel. :param sigma: Float, sigma of the kernel. Examples: >>> import torch >>> kernel = GaussianKernel() >>> x = torch.randn(4, 5, 10) >>> x.shape torch.Size([4, 5, 10]) >>> kernel(x).shape torch.Size([4, 5, 10]) """ def __init__(self, mu: 'float'=1.0, sigma: 'float'=1.0): """Gaussian kernel constructor.""" super().__init__() self.mu = mu self.sigma = sigma def forward(self, x): """Forward.""" return torch.exp(-0.5 * (x - self.mu) ** 2 / self.sigma ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = -0.5 tmp5 = tmp3 * tmp4 tmp6 = tmp5 * tmp1 tmp7 = tl_math.exp(tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_mul_pow_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GaussianKernelNew(nn.Module): """ Gaussian kernel module. :param mu: Float, mean of the kernel. :param sigma: Float, sigma of the kernel. Examples: >>> import torch >>> kernel = GaussianKernel() >>> x = torch.randn(4, 5, 10) >>> x.shape torch.Size([4, 5, 10]) >>> kernel(x).shape torch.Size([4, 5, 10]) """ def __init__(self, mu: 'float'=1.0, sigma: 'float'=1.0): """Gaussian kernel constructor.""" super().__init__() self.mu = mu self.sigma = sigma def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zfjsail/MatchZoo-py
GaussianKernel
false
4,691
[ "Apache-2.0" ]
0
c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
https://github.com/zfjsail/MatchZoo-py/tree/c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
Pointwise
import torch import torch.nn as nn import torch.nn.functional as F class Pointwise(nn.Module): def __init__(self, Cin=4, K=1, Cout=10): super(Pointwise, self).__init__() self.conv1 = nn.Conv2d(Cin, Cout, kernel_size=K, bias=False, padding=0, stride=1) def forward(self, x): return F.relu(self.conv1(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (10, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 4, 4), (160, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 10, 4, 4), (160, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1, buf2, 640, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf2 class PointwiseNew(nn.Module): def __init__(self, Cin=4, K=1, Cout=10): super(PointwiseNew, self).__init__() self.conv1 = nn.Conv2d(Cin, Cout, kernel_size=K, bias=False, padding=0, stride=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
sfu-arch/TensorBricks
Pointwise
false
4,692
[ "MIT" ]
0
c46c60d0939b7deb65f103bf34961d47419ce571
https://github.com/sfu-arch/TensorBricks/tree/c46c60d0939b7deb65f103bf34961d47419ce571
GCN
from torch.nn import Module import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules.module import Module class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.randn(in_features, out_features)) if bias: self.bias = Parameter(torch.zeros(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input.float(), self.weight.float()) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_3, buf3, alpha=1, beta=1, out=buf4) del primals_6 buf5 = buf3 del buf3 triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.randn(in_features, out_features)) if bias: self.bias = Parameter(torch.zeros(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input.float(), self.weight.float()) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
yutaoming/Rare-Category-Detection
GCN
false
4,693
[ "MIT" ]
0
76cf023dff44eef3ecc17f0ebf2b11a08cd63a73
https://github.com/yutaoming/Rare-Category-Detection/tree/76cf023dff44eef3ecc17f0ebf2b11a08cd63a73
Sage
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.module import Module class SageConv(Module): """ Simple Graphsage layer """ def __init__(self, in_features, out_features, bias=False): super(SageConv, self).__init__() self.proj = nn.Linear(in_features * 2, out_features, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.proj.weight) if self.proj.bias is not None: nn.init.constant_(self.proj.bias, 0.0) def forward(self, features, adj): """ Args: adj: can be sparse or dense matrix. """ if not isinstance(adj, torch.sparse.FloatTensor): if len(adj.shape) == 3: neigh_feature = torch.bmm(adj, features) / (adj.sum(dim=1). reshape((adj.shape[0], adj.shape[1], -1)) + 1) else: neigh_feature = torch.mm(adj, features) / (adj.sum(dim=1). reshape(adj.shape[0], -1) + 1) else: neigh_feature = torch.spmm(adj, features) / (adj.to_dense().sum (dim=1).reshape(adj.shape[0], -1) + 1) data = torch.cat([features, neigh_feature], dim=-1) combined = self.proj(data) return combined class Sage(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(Sage, self).__init__() self.sage1 = SageConv(nfeat, nhid) self.mlp = nn.Linear(nhid, nclass) self.dropout = dropout self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.mlp.weight, std=0.05) def forward(self, x, adj): x = F.relu(self.sage1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.mlp(x) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import torch.nn as nn from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + 4 * x1, tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tl.load(in_ptr2 + (1 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = tl.load(in_ptr2 + (2 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp12 + tmp13 tmp15 = tl.load(in_ptr2 + (3 + 4 * x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = 1.0 tmp18 = tmp16 + tmp17 tmp19 = tmp9 / tmp18 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp6, tmp19, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_2, buf0, primals_1, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf2 = buf0 del buf0 extern_kernels.mm(buf1, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf2) del primals_3 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_5 return buf4, buf1, buf3, primals_4 class SageConv(Module): """ Simple Graphsage layer """ def __init__(self, in_features, out_features, bias=False): super(SageConv, self).__init__() self.proj = nn.Linear(in_features * 2, out_features, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.proj.weight) if self.proj.bias is not None: nn.init.constant_(self.proj.bias, 0.0) def forward(self, features, adj): """ Args: adj: can be sparse or dense matrix. """ if not isinstance(adj, torch.sparse.FloatTensor): if len(adj.shape) == 3: neigh_feature = torch.bmm(adj, features) / (adj.sum(dim=1). reshape((adj.shape[0], adj.shape[1], -1)) + 1) else: neigh_feature = torch.mm(adj, features) / (adj.sum(dim=1). reshape(adj.shape[0], -1) + 1) else: neigh_feature = torch.spmm(adj, features) / (adj.to_dense().sum (dim=1).reshape(adj.shape[0], -1) + 1) data = torch.cat([features, neigh_feature], dim=-1) combined = self.proj(data) return combined class SageNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(SageNew, self).__init__() self.sage1 = SageConv(nfeat, nhid) self.mlp = nn.Linear(nhid, nclass) self.dropout = dropout self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.mlp.weight, std=0.05) def forward(self, input_0, input_1): primals_3 = self.sage1.proj.weight primals_1 = self.mlp.weight primals_5 = self.mlp.bias primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
yutaoming/Rare-Category-Detection
Sage
false
4,694
[ "MIT" ]
0
76cf023dff44eef3ecc17f0ebf2b11a08cd63a73
https://github.com/yutaoming/Rare-Category-Detection/tree/76cf023dff44eef3ecc17f0ebf2b11a08cd63a73
RankCrossEntropyLoss
import torch import torch.nn as nn import torch.nn.functional as F class RankCrossEntropyLoss(nn.Module): """Creates a criterion that measures rank cross entropy loss.""" __constants__ = ['num_neg'] def __init__(self, num_neg: 'int'=1): """ :class:`RankCrossEntropyLoss` constructor. :param num_neg: Number of negative instances in hinge loss. """ super().__init__() self.num_neg = num_neg def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'): """ Calculate rank cross entropy loss. :param y_pred: Predicted result. :param y_true: Label. :return: Rank cross loss. """ logits = y_pred[::self.num_neg + 1, :] labels = y_true[::self.num_neg + 1, :] for neg_idx in range(self.num_neg): neg_logits = y_pred[neg_idx + 1::self.num_neg + 1, :] neg_labels = y_true[neg_idx + 1::self.num_neg + 1, :] logits = torch.cat((logits, neg_logits), dim=-1) labels = torch.cat((labels, neg_labels), dim=-1) return -torch.mean(torch.sum(labels * torch.log(F.softmax(logits, dim=-1) + torch.finfo(float).eps), dim=-1)) @property def num_neg(self): """`num_neg` getter.""" return self._num_neg @num_neg.setter def num_neg(self, value): """`num_neg` setter.""" self._num_neg = value def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_add_cat_log_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = r2 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 128 * x1 + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (64 + 4 * x0 + 128 * x1 + (-4 + r2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, float('-inf')) tmp14 = triton_helpers.max2(tmp13, 1)[:, None] tmp15 = tmp10 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tl.load(in_ptr1 + (4 * x0 + 128 * x1 + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr1 + (64 + 4 * x0 + 128 * x1 + (-4 + r2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp16 / tmp20 tmp25 = 2.220446049250313e-16 tmp26 = tmp24 + tmp25 tmp27 = tl_math.log(tmp26) tmp28 = tmp23 * tmp27 tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tl.store(in_out_ptr0 + x3, tmp32, xmask) @triton.jit def triton_per_fused_mean_neg_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 32.0 tmp5 = tmp3 / tmp4 tmp6 = -tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((2, 4, 4, 1), (16, 4, 1, 32), torch.float32) buf2 = reinterpret_tensor(buf1, (2, 4, 4), (16, 4, 1), 0) del buf1 get_raw_stream(0) triton_per_fused__softmax_add_cat_log_mul_sum_0[grid(32)](buf2, arg0_1, arg1_1, 32, 8, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_mean_neg_1[grid(1)](buf4, buf2, 1, 32, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf4, class RankCrossEntropyLossNew(nn.Module): """Creates a criterion that measures rank cross entropy loss.""" __constants__ = ['num_neg'] def __init__(self, num_neg: 'int'=1): """ :class:`RankCrossEntropyLoss` constructor. :param num_neg: Number of negative instances in hinge loss. """ super().__init__() self.num_neg = num_neg @property def num_neg(self): """`num_neg` getter.""" return self._num_neg @num_neg.setter def num_neg(self, value): """`num_neg` setter.""" self._num_neg = value def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
zfjsail/MatchZoo-py
RankCrossEntropyLoss
false
4,695
[ "Apache-2.0" ]
0
c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
https://github.com/zfjsail/MatchZoo-py/tree/c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
TransformerDecoderLayer
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn.init import xavier_uniform_ from torch.nn import Dropout from torch.nn import LayerNorm class MultiheadAttention(nn.Module): """Allows the model to jointly attend to information from different representation subspaces. See reference: Attention Is All You Need .. math:: ext{MultiHead}(Q, K, V) = ext{Concat}(head_1,\\dots,head_h)W^O ext{where} head_i = ext{Attention}(QW_i^Q, KW_i^K, VW_i^V) Args: embed_dim: total dimension of the model num_heads: parallel attention layers, or heads """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.out_proj = Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() self.conv1 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv2 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv3 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) def _reset_parameters(self): xavier_uniform_(self.out_proj.weight) def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, attn_mask=None): """ Inputs of forward function query: [target length, batch size, embed dim] key: [sequence length, batch size, embed dim] value: [sequence length, batch size, embed dim] key_padding_mask: if True, mask padding based on batch size incremental_state: if provided, previous time steps are cashed need_weights: output attn_output_weights static_kv: key and value are static Outputs of forward function attn_output: [target length, batch size, embed dim] attn_output_weights: [batch size, target length, sequence length] """ q_shape = query.shape src_shape = key.shape q = self._in_proj_q(query) k = self._in_proj_k(key) v = self._in_proj_v(value) q *= self.scaling q = torch.reshape(q, (q_shape[0], q_shape[1], self.num_heads, self. head_dim)) q = q.permute(1, 2, 0, 3) k = torch.reshape(k, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) k = k.permute(1, 2, 0, 3) v = torch.reshape(v, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) v = v.permute(1, 2, 0, 3) if key_padding_mask is not None: assert key_padding_mask.shape[0] == q_shape[1] assert key_padding_mask.shape[1] == src_shape[0] attn_output_weights = torch.matmul(q, k.permute(0, 1, 3, 2)) if attn_mask is not None: attn_mask = torch.unsqueeze(torch.unsqueeze(attn_mask, 0), 0) attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = torch.reshape(attn_output_weights, [ q_shape[1], self.num_heads, q_shape[0], src_shape[0]]) key = torch.unsqueeze(torch.unsqueeze(key_padding_mask, 1), 2) key = key.type(torch.float32) y = torch.full(size=key.shape, fill_value=float('-Inf'), dtype= torch.float32) y = torch.where(key == 0.0, key, y) attn_output_weights += y attn_output_weights = F.softmax(attn_output_weights.type(torch. float32), dim=-1, dtype=torch.float32 if attn_output_weights. dtype == torch.float16 else attn_output_weights.dtype) attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_output_weights, v) attn_output = torch.reshape(attn_output.permute(2, 0, 1, 3), [ q_shape[0], q_shape[1], self.embed_dim]) attn_output = self.out_proj(attn_output) return attn_output def _in_proj_q(self, query): query = query.permute(1, 2, 0) query = torch.unsqueeze(query, dim=2) res = self.conv1(query) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_k(self, key): key = key.permute(1, 2, 0) key = torch.unsqueeze(key, dim=2) res = self.conv2(key) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_v(self, value): value = value.permute(1, 2, 0) value = torch.unsqueeze(value, dim=2) res = self.conv3(value) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res class TransformerDecoderLayer(nn.Module): """TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. This standard decoder layer is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 6000-6010. Users may modify or implement in a different way during application. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). """ def __init__(self, d_model, nhead, dim_feedforward=2048, attention_dropout_rate=0.0, residual_dropout_rate=0.1): super(TransformerDecoderLayer, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout= attention_dropout_rate) self.multihead_attn = MultiheadAttention(d_model, nhead, dropout= attention_dropout_rate) self.conv1 = nn.Conv2d(in_channels=d_model, out_channels= dim_feedforward, kernel_size=(1, 1)) self.conv2 = nn.Conv2d(in_channels=dim_feedforward, out_channels= d_model, kernel_size=(1, 1)) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.norm3 = LayerNorm(d_model) self.dropout1 = Dropout(residual_dropout_rate) self.dropout2 = Dropout(residual_dropout_rate) self.dropout3 = Dropout(residual_dropout_rate) def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None): """Pass the inputs (and mask) through the decoder layer. Args: tgt: the sequence to the decoder layer (required). memory: the sequnce from the last layer of the encoder (required). tgt_mask: the mask for the tgt sequence (optional). memory_mask: the mask for the memory sequence (optional). tgt_key_padding_mask: the mask for the tgt keys per batch (optional). memory_key_padding_mask: the mask for the memory keys per batch (optional). """ tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask) tgt = tgt + self.dropout1(tgt2) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask= memory_mask, key_padding_mask=memory_key_padding_mask) tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt = tgt.permute(1, 2, 0) tgt = torch.unsqueeze(tgt, 2) tgt2 = self.conv2(F.relu(self.conv1(tgt))) tgt2 = torch.squeeze(tgt2, 2) tgt2 = tgt2.permute(2, 0, 1) tgt = torch.squeeze(tgt, 2) tgt = tgt.permute(2, 0, 1) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn.init import xavier_uniform_ from torch.nn import Dropout from torch.nn import LayerNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) tl.store(out_ptr1 + x3, tmp0, xmask) tl.store(out_ptr2 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y4 = yindex x3 = xindex // 4 y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x5 + 16 * y4), xmask & ymask) tmp1 = tl.load(in_ptr0 + (4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (y0 + 4 * x5 + 64 * y1), tmp8, xmask & ymask) @triton.jit def triton_poi_fused_bmm_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 64 * (x0 // 4) + x0 % 4), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_unsqueeze_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_15(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1), tmp13, xmask) @triton.jit def triton_poi_fused_transpose_17(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_13, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4,), (1,)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (2048, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_24, (2048,), (1,)) assert_size_stride(primals_25, (4, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_26, (4,), (1,)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) buf4 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](primals_1, buf0, buf2, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 4), (16, 1, 16, 4)) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 4), (16, 1, 16, 4)) buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 4), (16, 1, 16, 4)) buf6 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf4 triton_poi_fused_convolution_1[grid(16, 4)](buf3, primals_5, buf6, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf7 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 triton_poi_fused_mul_2[grid(64)](buf1, primals_3, buf7, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 1, 16, 4), 0) del buf8 triton_poi_fused__softmax_4[grid(16, 16)](buf9, buf10, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf1 triton_poi_fused_convolution_1[grid(16, 4)](buf5, primals_7, buf11, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf12 = reinterpret_tensor(buf9, (16, 4, 4), (1, 64, 16), 0) del buf9 triton_poi_fused_bmm_5[grid(256)](buf10, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0) del buf5 extern_kernels.bmm(buf12, reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf13) buf14 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 triton_poi_fused_clone_6[grid(4, 16)](buf13, buf14, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0) del buf13 extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15) buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf15, primals_9, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_1, buf15, primals_9, buf16, buf17, primals_10, primals_11, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf19 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) triton_poi_fused_unsqueeze_9[grid(64)](buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_13, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 4, 1, 4), (16, 1, 16, 4)) buf21 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) triton_poi_fused_unsqueeze_9[grid(64)](primals_12, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf22 = extern_kernels.convolution(buf21, primals_15, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 4, 1, 4), (16, 1, 16, 4)) buf23 = extern_kernels.convolution(buf21, primals_17, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 4, 1, 4), (16, 1, 16, 4)) buf24 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_convolution_1[grid(16, 4)](buf22, primals_16, buf24, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_16 buf25 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0) del buf22 triton_poi_fused_mul_2[grid(64)](buf20, primals_14, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf26 = reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0) del buf12 extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf24, (16, 1, 4), (4, 0, 1), 0), out=buf26) buf27 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf26, buf27, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf28 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 1, 16, 4), 0) del buf26 triton_poi_fused__softmax_4[grid(16, 16)](buf27, buf28, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf29 = reinterpret_tensor(buf20, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf20 triton_poi_fused_convolution_1[grid(16, 4)](buf23, primals_18, buf29, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_18 buf30 = reinterpret_tensor(buf27, (16, 4, 4), (1, 64, 16), 0) del buf27 triton_poi_fused_bmm_5[grid(256)](buf28, buf30, 256, XBLOCK=256, num_warps=4, num_stages=1) buf31 = reinterpret_tensor(buf23, (16, 4, 1), (4, 1, 1), 0) del buf23 extern_kernels.bmm(buf30, reinterpret_tensor(buf29, (16, 4, 1), (4, 1, 0), 0), out=buf31) del buf30 buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(4, 16)](buf31, buf32, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf33 = reinterpret_tensor(buf31, (16, 4), (4, 1), 0) del buf31 extern_kernels.mm(reinterpret_tensor(buf32, (16, 4), (4, 1), 0), reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf33) buf34 = buf18 del buf18 triton_poi_fused_add_10[grid(64)](buf34, buf33, primals_20, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_20 buf35 = buf17 del buf17 buf36 = buf16 del buf16 triton_poi_fused_native_layer_norm_11[grid(16)](buf34, buf35, buf36, 16, XBLOCK=16, num_warps=1, num_stages=1) buf37 = reinterpret_tensor(buf33, (4, 4, 4), (16, 4, 1), 0) del buf33 triton_poi_fused_native_layer_norm_12[grid(64)](buf34, buf35, buf36, primals_21, primals_22, buf37, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_22 buf38 = empty_strided_cuda((4, 4, 1, 4), (16, 1, 16, 4), torch.float32) triton_poi_fused_unsqueeze_9[grid(64)](buf37, buf38, 64, XBLOCK=64, num_warps=1, num_stages=1) buf39 = extern_kernels.convolution(buf38, primals_23, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 2048, 1, 4), (8192, 1, 8192, 2048)) buf40 = buf39 del buf39 triton_poi_fused_convolution_relu_13[grid(32768)](buf40, primals_24, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_24 buf41 = extern_kernels.convolution(buf40, primals_25, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 4, 1, 4), (16, 1, 16, 4)) buf42 = buf41 del buf41 triton_poi_fused_convolution_14[grid(64)](buf42, primals_26, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_26 buf43 = reinterpret_tensor(buf36, (4, 4, 1), (1, 4, 16), 0) del buf36 buf44 = reinterpret_tensor(buf35, (4, 4, 1), (1, 4, 16), 0) del buf35 triton_poi_fused_add_native_layer_norm_15[grid(16)](buf38, buf42, buf43, buf44, 16, XBLOCK=16, num_warps=1, num_stages=1) buf45 = buf37 del buf37 triton_poi_fused_add_native_layer_norm_16[grid(64)](buf38, buf42, buf43, buf44, primals_27, primals_28, buf45, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf43 del buf44 del primals_28 buf46 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) triton_poi_fused_transpose_17[grid(16, 4)](buf25, buf46, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf47 = reinterpret_tensor(buf25, (16, 1, 4), (4, 4, 1), 0) del buf25 triton_poi_fused_transpose_17[grid(16, 4)](buf7, buf47, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf7 return (buf45, primals_1, primals_2, primals_4, primals_6, primals_9, primals_10, primals_13, primals_15, primals_17, primals_21, primals_23, primals_25, primals_27, buf10, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, buf19, buf21, buf28, reinterpret_tensor (buf32, (16, 4), (4, 1), 0), buf34, buf38, buf40, buf42, primals_19, reinterpret_tensor(buf29, (16, 1, 4), (4, 4, 1), 0), buf46, reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 4), 0), primals_8, reinterpret_tensor(buf11, (16, 1, 4), (4, 4, 1), 0), buf47, reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 4), 0)) class MultiheadAttention(nn.Module): """Allows the model to jointly attend to information from different representation subspaces. See reference: Attention Is All You Need .. math:: ext{MultiHead}(Q, K, V) = ext{Concat}(head_1,\\dots,head_h)W^O ext{where} head_i = ext{Attention}(QW_i^Q, KW_i^K, VW_i^V) Args: embed_dim: total dimension of the model num_heads: parallel attention layers, or heads """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.out_proj = Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() self.conv1 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv2 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) self.conv3 = torch.nn.Conv2d(in_channels=embed_dim, out_channels= embed_dim, kernel_size=(1, 1)) def _reset_parameters(self): xavier_uniform_(self.out_proj.weight) def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, attn_mask=None): """ Inputs of forward function query: [target length, batch size, embed dim] key: [sequence length, batch size, embed dim] value: [sequence length, batch size, embed dim] key_padding_mask: if True, mask padding based on batch size incremental_state: if provided, previous time steps are cashed need_weights: output attn_output_weights static_kv: key and value are static Outputs of forward function attn_output: [target length, batch size, embed dim] attn_output_weights: [batch size, target length, sequence length] """ q_shape = query.shape src_shape = key.shape q = self._in_proj_q(query) k = self._in_proj_k(key) v = self._in_proj_v(value) q *= self.scaling q = torch.reshape(q, (q_shape[0], q_shape[1], self.num_heads, self. head_dim)) q = q.permute(1, 2, 0, 3) k = torch.reshape(k, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) k = k.permute(1, 2, 0, 3) v = torch.reshape(v, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) v = v.permute(1, 2, 0, 3) if key_padding_mask is not None: assert key_padding_mask.shape[0] == q_shape[1] assert key_padding_mask.shape[1] == src_shape[0] attn_output_weights = torch.matmul(q, k.permute(0, 1, 3, 2)) if attn_mask is not None: attn_mask = torch.unsqueeze(torch.unsqueeze(attn_mask, 0), 0) attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = torch.reshape(attn_output_weights, [ q_shape[1], self.num_heads, q_shape[0], src_shape[0]]) key = torch.unsqueeze(torch.unsqueeze(key_padding_mask, 1), 2) key = key.type(torch.float32) y = torch.full(size=key.shape, fill_value=float('-Inf'), dtype= torch.float32) y = torch.where(key == 0.0, key, y) attn_output_weights += y attn_output_weights = F.softmax(attn_output_weights.type(torch. float32), dim=-1, dtype=torch.float32 if attn_output_weights. dtype == torch.float16 else attn_output_weights.dtype) attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_output_weights, v) attn_output = torch.reshape(attn_output.permute(2, 0, 1, 3), [ q_shape[0], q_shape[1], self.embed_dim]) attn_output = self.out_proj(attn_output) return attn_output def _in_proj_q(self, query): query = query.permute(1, 2, 0) query = torch.unsqueeze(query, dim=2) res = self.conv1(query) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_k(self, key): key = key.permute(1, 2, 0) key = torch.unsqueeze(key, dim=2) res = self.conv2(key) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res def _in_proj_v(self, value): value = value.permute(1, 2, 0) value = torch.unsqueeze(value, dim=2) res = self.conv3(value) res = torch.squeeze(res, dim=2) res = res.permute(2, 0, 1) return res class TransformerDecoderLayerNew(nn.Module): """TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. This standard decoder layer is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 6000-6010. Users may modify or implement in a different way during application. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). """ def __init__(self, d_model, nhead, dim_feedforward=2048, attention_dropout_rate=0.0, residual_dropout_rate=0.1): super(TransformerDecoderLayerNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout= attention_dropout_rate) self.multihead_attn = MultiheadAttention(d_model, nhead, dropout= attention_dropout_rate) self.conv1 = nn.Conv2d(in_channels=d_model, out_channels= dim_feedforward, kernel_size=(1, 1)) self.conv2 = nn.Conv2d(in_channels=dim_feedforward, out_channels= d_model, kernel_size=(1, 1)) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.norm3 = LayerNorm(d_model) self.dropout1 = Dropout(residual_dropout_rate) self.dropout2 = Dropout(residual_dropout_rate) self.dropout3 = Dropout(residual_dropout_rate) def forward(self, input_0, input_1): primals_8 = self.self_attn.out_proj.weight primals_3 = self.self_attn.out_proj.bias primals_2 = self.self_attn.conv1.weight primals_5 = self.self_attn.conv1.bias primals_4 = self.self_attn.conv2.weight primals_7 = self.self_attn.conv2.bias primals_6 = self.self_attn.conv3.weight primals_9 = self.self_attn.conv3.bias primals_19 = self.multihead_attn.out_proj.weight primals_10 = self.multihead_attn.out_proj.bias primals_13 = self.multihead_attn.conv1.weight primals_11 = self.multihead_attn.conv1.bias primals_15 = self.multihead_attn.conv2.weight primals_14 = self.multihead_attn.conv2.bias primals_17 = self.multihead_attn.conv3.weight primals_16 = self.multihead_attn.conv3.bias primals_23 = self.conv1.weight primals_24 = self.conv1.bias primals_25 = self.conv2.weight primals_18 = self.conv2.bias primals_20 = self.norm1.weight primals_21 = self.norm1.bias primals_22 = self.norm2.weight primals_26 = self.norm2.bias primals_27 = self.norm3.weight primals_28 = self.norm3.bias primals_1 = input_0 primals_12 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28]) return output[0]
verages/PaddleOCR2Pytorch
TransformerDecoderLayer
false
4,696
[ "Apache-2.0" ]
0
201f0d5d6007f49620c49af7d222c3b220eb3e70
https://github.com/verages/PaddleOCR2Pytorch/tree/201f0d5d6007f49620c49af7d222c3b220eb3e70
ReLU
import torch import torch.nn as nn from abc import abstractmethod import torch.utils.data import torch.nn class EfficientBlockBase(nn.Module): """ PyTorchVideo/accelerator provides a set of efficient blocks that have optimal efficiency for each target hardware device. Each efficient block has two forms: - original form: this form is for training. When efficient block is instantiated, it is in this original form. - deployable form: this form is for deployment. Once the network is ready for deploy, it can be converted into deployable form for efficient execution on target hardware. One block is transformed into deployable form by calling convert() method. By conversion to deployable form, various optimization (operator fuse, kernel optimization, etc.) are applied. EfficientBlockBase is the base class for efficient blocks. All efficient blocks should inherit this base class and implement following methods: - forward(): same as required by nn.Module - convert(): called to convert block into deployable form """ @abstractmethod def convert(self): pass @abstractmethod def forward(self): pass class ReLU(EfficientBlockBase): """ ReLU activation function for EfficientBlockBase. """ def __init__(self): super().__init__() self.act = nn.ReLU(inplace=True) def forward(self, x): return self.act(x) def convert(self, *args, **kwarg): pass def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from abc import abstractmethod import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) return arg0_1, class EfficientBlockBase(nn.Module): """ PyTorchVideo/accelerator provides a set of efficient blocks that have optimal efficiency for each target hardware device. Each efficient block has two forms: - original form: this form is for training. When efficient block is instantiated, it is in this original form. - deployable form: this form is for deployment. Once the network is ready for deploy, it can be converted into deployable form for efficient execution on target hardware. One block is transformed into deployable form by calling convert() method. By conversion to deployable form, various optimization (operator fuse, kernel optimization, etc.) are applied. EfficientBlockBase is the base class for efficient blocks. All efficient blocks should inherit this base class and implement following methods: - forward(): same as required by nn.Module - convert(): called to convert block into deployable form """ @abstractmethod def convert(self): pass @abstractmethod def forward(self): pass class ReLUNew(EfficientBlockBase): """ ReLU activation function for EfficientBlockBase. """ def __init__(self): super().__init__() self.act = nn.ReLU(inplace=True) def convert(self, *args, **kwarg): pass def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zijian-hu/pytorchvideo
ReLU
false
4,697
[ "Apache-2.0" ]
0
51589b100437af2285c56ce2ccc7ccecb7f9b18b
https://github.com/zijian-hu/pytorchvideo/tree/51589b100437af2285c56ce2ccc7ccecb7f9b18b
Depthwise
import torch import torch.nn as nn import torch.nn.functional as F class Depthwise(nn.Module): def __init__(self, Cin=10, K=3, depth_multiplier=1): super(Depthwise, self).__init__() self.conv1 = nn.Conv2d(Cin, depth_multiplier * Cin, kernel_size=K, groups=Cin, bias=False, padding=0, stride=1) def forward(self, x): return F.relu(self.conv1(x)) def get_inputs(): return [torch.rand([4, 10, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 153760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x1 = xindex % 3844 x2 = xindex // 3844 tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + (x1 + 3968 * x2), tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (10, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 10, 64, 64), (40960, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=10, bias=None) assert_size_stride(buf0, (4, 10, 62, 62), (38440, 3844, 62, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 10, 62, 62), (39680, 3968, 62, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(153760)](buf1, buf2, 153760, XBLOCK=512, num_warps=8, num_stages=1) return buf1, primals_1, primals_2, buf2 class DepthwiseNew(nn.Module): def __init__(self, Cin=10, K=3, depth_multiplier=1): super(DepthwiseNew, self).__init__() self.conv1 = nn.Conv2d(Cin, depth_multiplier * Cin, kernel_size=K, groups=Cin, bias=False, padding=0, stride=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
sfu-arch/TensorBricks
Depthwise
false
4,698
[ "MIT" ]
0
c46c60d0939b7deb65f103bf34961d47419ce571
https://github.com/sfu-arch/TensorBricks/tree/c46c60d0939b7deb65f103bf34961d47419ce571
LearnMaskedDefault
import torch import torch.nn as nn import torch.utils.data import torch.nn class LearnMaskedDefault(nn.Module): """ Learns default values to fill invalid entries within input tensors. The invalid entries are represented by a mask which is passed into forward alongside the input tensor. Note the default value is only used if all entries in the batch row are invalid rather than just a portion of invalid entries within each batch row. """ def __init__(self, feature_dim: 'int', init_method: 'str'='gaussian', freeze: 'bool'=False): """ Args: feature_dim (int): the size of the default value parameter, this must match the input tensor size. init_method (str): the initial default value parameter. Options: 'guassian' 'zeros' freeze (bool): If True, the learned default parameter weights are frozen. """ super().__init__() if init_method == 'zeros': self._learned_defaults = nn.Parameter(torch.zeros(feature_dim), requires_grad=not freeze) elif init_method == 'gaussian': self._learned_defaults = nn.Parameter(torch.Tensor(feature_dim), requires_grad=not freeze) nn.init.normal_(self._learned_defaults) else: raise NotImplementedError( f"{init_method} not available. Options are: 'zeros' or 'gaussian'" ) def forward(self, x: 'torch.Tensor', mask: 'torch.Tensor') ->torch.Tensor: """ Args: x (torch.Tensor): tensor of shape (batch_size, feature_dim). mask (torch.Tensor): bool tensor of shape (batch_size, seq_len) If all elements in the batch dimension are False the learned default parameter is used for that batch element. Returns: Tensor with shape (batch_size, feature_dim) """ mask = mask.view(mask.shape[0], -1).any(dim=-1) for i in range(1, x.dim()): mask = mask.unsqueeze(i) x = x * mask.float() + self._learned_defaults * (1 - mask.float()) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__to_copy_add_any_mul_rsub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r2 = rindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp9 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp1 = tmp0 != 0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = triton_helpers.any(tmp4, 1)[:, None] tmp7 = tmp5.to(tl.float32) tmp8 = tmp6 * tmp7 tmp10 = 1.0 tmp11 = tmp10 - tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tl.store(out_ptr1 + (r1 + 64 * x0), tmp13, xmask) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.bool) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_add_any_mul_rsub_0[grid(4)](primals_1, primals_2, primals_3, buf0, buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 del primals_3 return buf1, reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0) class LearnMaskedDefaultNew(nn.Module): """ Learns default values to fill invalid entries within input tensors. The invalid entries are represented by a mask which is passed into forward alongside the input tensor. Note the default value is only used if all entries in the batch row are invalid rather than just a portion of invalid entries within each batch row. """ def __init__(self, feature_dim: 'int', init_method: 'str'='gaussian', freeze: 'bool'=False): """ Args: feature_dim (int): the size of the default value parameter, this must match the input tensor size. init_method (str): the initial default value parameter. Options: 'guassian' 'zeros' freeze (bool): If True, the learned default parameter weights are frozen. """ super().__init__() if init_method == 'zeros': self._learned_defaults = nn.Parameter(torch.zeros(feature_dim), requires_grad=not freeze) elif init_method == 'gaussian': self._learned_defaults = nn.Parameter(torch.Tensor(feature_dim), requires_grad=not freeze) nn.init.normal_(self._learned_defaults) else: raise NotImplementedError( f"{init_method} not available. Options are: 'zeros' or 'gaussian'" ) def forward(self, input_0, input_1): primals_3 = self._learned_defaults primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
zijian-hu/pytorchvideo
LearnMaskedDefault
false
4,699
[ "Apache-2.0" ]
0
51589b100437af2285c56ce2ccc7ccecb7f9b18b
https://github.com/zijian-hu/pytorchvideo/tree/51589b100437af2285c56ce2ccc7ccecb7f9b18b
MatchingTensor
import torch import torch.nn as nn import torch.nn.functional as F class MatchingTensor(nn.Module): """ Module that captures the basic interactions between two tensors. :param matching_dims: Word dimension of two interaction texts. :param channels: Number of word interaction tensor channels. :param normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. :param init_diag: Whether to initialize the diagonal elements of the matrix. Examples: >>> import matchzoo as mz >>> matching_dim = 5 >>> matching_tensor = mz.modules.MatchingTensor( ... matching_dim, ... channels=4, ... normalize=True, ... init_diag=True ... ) """ def __init__(self, matching_dim: 'int', channels: 'int'=4, normalize: 'bool'=True, init_diag: 'bool'=True): """:class:`MatchingTensor` constructor.""" super().__init__() self._matching_dim = matching_dim self._channels = channels self._normalize = normalize self._init_diag = init_diag self.interaction_matrix = torch.empty(self._channels, self. _matching_dim, self._matching_dim) if self._init_diag: self.interaction_matrix = self.interaction_matrix.uniform_(- 0.05, 0.05) for channel_index in range(self._channels): self.interaction_matrix[channel_index].fill_diagonal_(0.1) self.interaction_matrix = nn.Parameter(self.interaction_matrix) else: self.interaction_matrix = nn.Parameter(self.interaction_matrix. uniform_()) def forward(self, x, y): """ The computation logic of MatchingTensor. :param inputs: two input tensors. """ if self._normalize: x = F.normalize(x, p=2, dim=-1) y = F.normalize(y, p=2, dim=-1) output = torch.einsum('bld,cde,bre->bclr', x, self. interaction_matrix, y) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'matching_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = xindex // 4 y0 = yindex % 4 y1 = yindex // 4 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask) tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 4, 1, 1), (16, 4, 4, 1, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 4), (0, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 16), (0, 16, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_div_0[grid(64)](primals_2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf4 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 16)](buf2, buf4, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0) del buf2 extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), out=buf5) del buf4 return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 4, 1, 16), 0 ), reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (1, 4, 16), (64, 1, 4), 0) class MatchingTensorNew(nn.Module): """ Module that captures the basic interactions between two tensors. :param matching_dims: Word dimension of two interaction texts. :param channels: Number of word interaction tensor channels. :param normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. :param init_diag: Whether to initialize the diagonal elements of the matrix. Examples: >>> import matchzoo as mz >>> matching_dim = 5 >>> matching_tensor = mz.modules.MatchingTensor( ... matching_dim, ... channels=4, ... normalize=True, ... init_diag=True ... ) """ def __init__(self, matching_dim: 'int', channels: 'int'=4, normalize: 'bool'=True, init_diag: 'bool'=True): """:class:`MatchingTensor` constructor.""" super().__init__() self._matching_dim = matching_dim self._channels = channels self._normalize = normalize self._init_diag = init_diag self.interaction_matrix = torch.empty(self._channels, self. _matching_dim, self._matching_dim) if self._init_diag: self.interaction_matrix = self.interaction_matrix.uniform_(- 0.05, 0.05) for channel_index in range(self._channels): self.interaction_matrix[channel_index].fill_diagonal_(0.1) self.interaction_matrix = nn.Parameter(self.interaction_matrix) else: self.interaction_matrix = nn.Parameter(self.interaction_matrix. uniform_()) def forward(self, input_0, input_1): primals_1 = self.interaction_matrix primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
zfjsail/MatchZoo-py
MatchingTensor
false
4,700
[ "Apache-2.0" ]
0
c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
https://github.com/zfjsail/MatchZoo-py/tree/c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
AdaptiveAvgPool3dOutSize1
import torch from typing import Tuple import torch.nn as nn from abc import abstractmethod import torch.utils.data import torch.nn class EfficientBlockBase(nn.Module): """ PyTorchVideo/accelerator provides a set of efficient blocks that have optimal efficiency for each target hardware device. Each efficient block has two forms: - original form: this form is for training. When efficient block is instantiated, it is in this original form. - deployable form: this form is for deployment. Once the network is ready for deploy, it can be converted into deployable form for efficient execution on target hardware. One block is transformed into deployable form by calling convert() method. By conversion to deployable form, various optimization (operator fuse, kernel optimization, etc.) are applied. EfficientBlockBase is the base class for efficient blocks. All efficient blocks should inherit this base class and implement following methods: - forward(): same as required by nn.Module - convert(): called to convert block into deployable form """ @abstractmethod def convert(self): pass @abstractmethod def forward(self): pass class AdaptiveAvgPool3dOutSize1(EfficientBlockBase): """ Implements AdaptiveAvgPool3d with output (T, H, W) = (1, 1, 1). This operator has better efficiency than AdaptiveAvgPool for mobile CPU. """ def __init__(self): super().__init__() self.pool = nn.AdaptiveAvgPool3d(1) self.convert_flag = False def convert(self, input_blob_size: 'Tuple', **kwargs): """ Converts AdaptiveAvgPool into AvgPool with constant kernel size for better efficiency. Args: input_blob_size (tuple): blob size at the input of AdaptiveAvgPool3dOutSize1 instance during forward. kwargs (any): any keyword argument (unused). """ assert self.convert_flag is False, 'AdaptiveAvgPool3dOutSize1: already converted, cannot be converted again' kernel_size = input_blob_size[2:] self.pool = nn.AvgPool3d(kernel_size) self.convert_flag = True def forward(self, x): return self.pool(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from typing import Tuple import torch.nn as nn from abc import abstractmethod import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(4)](buf1, arg0_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class EfficientBlockBase(nn.Module): """ PyTorchVideo/accelerator provides a set of efficient blocks that have optimal efficiency for each target hardware device. Each efficient block has two forms: - original form: this form is for training. When efficient block is instantiated, it is in this original form. - deployable form: this form is for deployment. Once the network is ready for deploy, it can be converted into deployable form for efficient execution on target hardware. One block is transformed into deployable form by calling convert() method. By conversion to deployable form, various optimization (operator fuse, kernel optimization, etc.) are applied. EfficientBlockBase is the base class for efficient blocks. All efficient blocks should inherit this base class and implement following methods: - forward(): same as required by nn.Module - convert(): called to convert block into deployable form """ @abstractmethod def convert(self): pass @abstractmethod def forward(self): pass class AdaptiveAvgPool3dOutSize1New(EfficientBlockBase): """ Implements AdaptiveAvgPool3d with output (T, H, W) = (1, 1, 1). This operator has better efficiency than AdaptiveAvgPool for mobile CPU. """ def __init__(self): super().__init__() self.pool = nn.AdaptiveAvgPool3d(1) self.convert_flag = False def convert(self, input_blob_size: 'Tuple', **kwargs): """ Converts AdaptiveAvgPool into AvgPool with constant kernel size for better efficiency. Args: input_blob_size (tuple): blob size at the input of AdaptiveAvgPool3dOutSize1 instance during forward. kwargs (any): any keyword argument (unused). """ assert self.convert_flag is False, 'AdaptiveAvgPool3dOutSize1: already converted, cannot be converted again' kernel_size = input_blob_size[2:] self.pool = nn.AvgPool3d(kernel_size) self.convert_flag = True def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zijian-hu/pytorchvideo
AdaptiveAvgPool3dOutSize1
false
4,701
[ "Apache-2.0" ]
0
51589b100437af2285c56ce2ccc7ccecb7f9b18b
https://github.com/zijian-hu/pytorchvideo/tree/51589b100437af2285c56ce2ccc7ccecb7f9b18b
Cat
import torch import torch.nn as nn class Cat(nn.Module): def __init__(self): super(Cat, self).__init__() def forward(self, x): addition = torch.split(x, 2, dim=1)[0] None x = torch.cat([x, addition], dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 6 x0 = xindex % 16 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 6, tl.int64) tmp9 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CatNew(nn.Module): def __init__(self): super(CatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
Cat
false
4,702
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
MaskedTemporalPooling
import torch from typing import Optional import torch.utils.data import torch.nn class MaskedTemporalPooling(torch.nn.Module): """ Applies temporal pooling operations on masked inputs. For each pooling operation all masked values are ignored. """ def __init__(self, method: 'str'): """ method (str): the method of pooling to use. Options: 'max': reduces temporal dimension to each valid max value. 'avg': averages valid values in the temporal dimension. 'sum': sums valid values in the temporal dimension. Note if all batch row elements are invalid, the temporal dimension is pooled to 0 values. """ super().__init__() assert method in ('max', 'avg', 'sum') self._method = method def forward(self, x: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None ) ->torch.Tensor: """ Args: x (torch.Tensor): tensor with shape (batch_size, seq_len, feature_dim) mask (torch.Tensor): bool tensor with shape (batch_size, seq_len). Sequence elements that are False are invalid. Returns: Tensor with shape (batch_size, feature_dim) """ assert x.dim( ) == 3, 'Requires x shape (batch_size x seq_len x feature_dim)' b, t = x.shape[0], x.shape[1] if mask is None: mask = torch.ones((b, t), dtype=torch.bool) if self._method == 'max': x[~mask, :] = float('-inf') invalid_first_dim = ~mask.view(b, -1).any(dim=-1) x[invalid_first_dim, :] = 0 x = torch.max(x, dim=1)[0] elif self._method == 'avg': x = x * mask.unsqueeze(-1).float() mask = mask.view(b, t, -1).any(dim=-1) valid_lengths = mask.float().sum(dim=-1).int() x = x.sum(dim=1) x = x.div(valid_lengths.clamp(min=1).unsqueeze(-1).expand(x. size()).float()) elif self._method == 'sum': x = x * mask.unsqueeze(-1).float() x = x.sum(dim=1) else: raise NotImplementedError( f"{self._method} not available options are: 'max', 'avg', 'sum'" ) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'method': 'max'}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], False, tl.int1) tmp2 = float('-inf') tmp3 = tl.where(tmp1, tmp2, tmp0) tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + x0, xmask) tmp0 = tl.full([1], True, tl.int1) tmp1 = tmp0 | tmp0 tmp2 = tmp1 | tmp0 tmp3 = tmp2 | tmp0 tmp4 = tmp3 == 0 tmp6 = 0.0 tmp7 = tl.where(tmp4, tmp6, tmp5) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0[grid(64)](arg0_1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) triton_poi_fused_index_put_lift_fresh_1[grid(64)](arg0_1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_max_2[grid(16)](arg0_1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf2, class MaskedTemporalPoolingNew(torch.nn.Module): """ Applies temporal pooling operations on masked inputs. For each pooling operation all masked values are ignored. """ def __init__(self, method: 'str'): """ method (str): the method of pooling to use. Options: 'max': reduces temporal dimension to each valid max value. 'avg': averages valid values in the temporal dimension. 'sum': sums valid values in the temporal dimension. Note if all batch row elements are invalid, the temporal dimension is pooled to 0 values. """ super().__init__() assert method in ('max', 'avg', 'sum') self._method = method def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zijian-hu/pytorchvideo
MaskedTemporalPooling
false
4,703
[ "Apache-2.0" ]
0
51589b100437af2285c56ce2ccc7ccecb7f9b18b
https://github.com/zijian-hu/pytorchvideo/tree/51589b100437af2285c56ce2ccc7ccecb7f9b18b
Add
import torch import torch.nn as nn class Add(nn.Module): def __init__(self): super(Add, self).__init__() def forward(self, x): x = torch.add(x, 20) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class AddNew(nn.Module): def __init__(self): super(AddNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
Add
false
4,704
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
SemanticComposite
import torch import torch.nn as nn class SemanticComposite(nn.Module): """ SemanticComposite module. Apply a self-attention layer and a semantic composite fuse gate to compute the encoding result of one tensor. :param in_features: Feature size of input. :param dropout_rate: The dropout rate. Examples: >>> import torch >>> module = SemanticComposite(in_features=10) >>> x = torch.randn(4, 5, 10) >>> x.shape torch.Size([4, 5, 10]) >>> module(x).shape torch.Size([4, 5, 10]) """ def __init__(self, in_features, dropout_rate: 'float'=0.0): """Init.""" super().__init__() self.att_linear = nn.Linear(3 * in_features, 1, False) self.z_gate = nn.Linear(2 * in_features, in_features, True) self.r_gate = nn.Linear(2 * in_features, in_features, True) self.f_gate = nn.Linear(2 * in_features, in_features, True) self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): """Forward.""" seq_length = x.shape[1] x_1 = x.unsqueeze(dim=2).repeat(1, 1, seq_length, 1) x_2 = x.unsqueeze(dim=1).repeat(1, seq_length, 1, 1) x_concat = torch.cat([x_1, x_2, x_1 * x_2], dim=-1) x_concat = self.dropout(x_concat) attn_matrix = self.att_linear(x_concat).squeeze(dim=-1) attn_weight = torch.softmax(attn_matrix, dim=2) attn = torch.bmm(attn_weight, x) x_attn_concat = self.dropout(torch.cat([x, attn], dim=-1)) x_attn_concat = torch.cat([x, attn], dim=-1) z = torch.tanh(self.z_gate(x_attn_concat)) r = torch.sigmoid(self.r_gate(x_attn_concat)) f = torch.sigmoid(self.f_gate(x_attn_concat)) encoding = r * x + f * z return encoding def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x4 = xindex // 48 x1 = xindex // 12 % 4 x3 = xindex // 192 x5 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 * x1 + 16 * x3 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr0 + (4 * x4 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr0 + (4 * x1 + 16 * x3 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 * tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp11, tmp16, tmp17) tmp19 = tl.where(tmp9, tmp10, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x5, tmp20, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp7 = libdevice.tanh(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tmp3 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 12), (12, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(768)](primals_1, buf0, 768, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 12), (12, 1), 0), reinterpret_tensor(primals_2, (12, 1), (1, 12), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 extern_kernels.bmm(buf3, primals_1, out=buf4) buf5 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](primals_1, buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0) del buf4 extern_kernels.addmm(primals_4, reinterpret_tensor(buf5, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf6) del primals_4 buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf5, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf7) del primals_6 buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf8) del primals_8 buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_4[grid(64)](buf7, primals_1, buf8, buf6, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf9, primals_1, reinterpret_tensor(buf0, (64, 12), (12, 1), 0 ), buf3, reinterpret_tensor(buf5, (16, 8), (8, 1), 0 ), buf6, buf7, buf8, primals_7, primals_5, primals_3 class SemanticCompositeNew(nn.Module): """ SemanticComposite module. Apply a self-attention layer and a semantic composite fuse gate to compute the encoding result of one tensor. :param in_features: Feature size of input. :param dropout_rate: The dropout rate. Examples: >>> import torch >>> module = SemanticComposite(in_features=10) >>> x = torch.randn(4, 5, 10) >>> x.shape torch.Size([4, 5, 10]) >>> module(x).shape torch.Size([4, 5, 10]) """ def __init__(self, in_features, dropout_rate: 'float'=0.0): """Init.""" super().__init__() self.att_linear = nn.Linear(3 * in_features, 1, False) self.z_gate = nn.Linear(2 * in_features, in_features, True) self.r_gate = nn.Linear(2 * in_features, in_features, True) self.f_gate = nn.Linear(2 * in_features, in_features, True) self.dropout = nn.Dropout(p=dropout_rate) def forward(self, input_0): primals_2 = self.att_linear.weight primals_3 = self.z_gate.weight primals_4 = self.z_gate.bias primals_5 = self.r_gate.weight primals_6 = self.r_gate.bias primals_7 = self.f_gate.weight primals_8 = self.f_gate.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
zfjsail/MatchZoo-py
SemanticComposite
false
4,705
[ "Apache-2.0" ]
0
c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
https://github.com/zfjsail/MatchZoo-py/tree/c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
Pow
import torch import torch.nn as nn class Pow(nn.Module): def __init__(self): super(Pow, self).__init__() def forward(self, x): x = torch.pow(x, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PowNew(nn.Module): def __init__(self): super(PowNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
Pow
false
4,706
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
Div
import torch import torch.nn as nn class Div(nn.Module): def __init__(self): super(Div, self).__init__() def forward(self, x): x = torch.div(x, 0.5) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class DivNew(nn.Module): def __init__(self): super(DivNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
Div
false
4,707
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
MatchModule
import torch import torch.nn as nn import torch.nn.functional as F class MatchModule(nn.Module): """ Computing the match representation for Match LSTM. :param hidden_size: Size of hidden vectors. :param dropout_rate: Dropout rate of the projection layer. Defaults to 0. Examples: >>> import torch >>> attention = MatchModule(hidden_size=10) >>> v1 = torch.randn(4, 5, 10) >>> v1.shape torch.Size([4, 5, 10]) >>> v2 = torch.randn(4, 5, 10) >>> v2_mask = torch.ones(4, 5).to(dtype=torch.uint8) >>> attention(v1, v2, v2_mask).shape torch.Size([4, 5, 20]) """ def __init__(self, hidden_size, dropout_rate=0): """Init.""" super().__init__() self.v2_proj = nn.Linear(hidden_size, hidden_size) self.proj = nn.Linear(hidden_size * 4, hidden_size * 2) self.dropout = nn.Dropout(p=dropout_rate) def forward(self, v1, v2, v2_mask): """Computing attention vectors and projection vectors.""" proj_v2 = self.v2_proj(v2) similarity_matrix = v1.bmm(proj_v2.transpose(2, 1).contiguous()) v1_v2_attn = F.softmax(similarity_matrix.masked_fill(v2_mask. unsqueeze(1).bool(), -1e-07), dim=2) v2_wsum = v1_v2_attn.bmm(v2) fusion = torch.cat([v1, v2_wsum, v1 - v2_wsum, v1 * v2_wsum], dim=2) match = self.dropout(F.relu(self.proj(fusion))) return match def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 != 0 tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp5 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1.0000000116860974e-07 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp6 = tl.where(tmp4, tmp2, tmp5) tmp7 = triton_helpers.maximum(tmp3, tmp6) tmp10 = tl.where(tmp8, tmp2, tmp9) tmp11 = triton_helpers.maximum(tmp7, tmp10) tmp14 = tl.where(tmp12, tmp2, tmp13) tmp15 = triton_helpers.maximum(tmp11, tmp14) tmp16 = tmp3 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp6 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp10 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex x4 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp4 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = -1.0000000116860974e-07 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tl.store(in_out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 16), (16, 1)) assert_size_stride(primals_7, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0) del buf0 get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_4, buf1, out=buf2) buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.bool) triton_poi_fused__to_copy_1[grid(16)](primals_5, buf3, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(16)](buf3, buf2, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf2 del buf2 triton_poi_fused__softmax_masked_fill_3[grid(64)](buf6, buf3, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 del buf5 buf7 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(buf6, primals_3, out=buf7) buf8 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_4[grid(256)](primals_4, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 8), (1, 16), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 8), (32, 8, 1), 0) del buf9 buf11 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(128)](buf10, primals_7, buf11, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf10, primals_3, primals_4, buf3, buf6, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), buf11, primals_6 class MatchModuleNew(nn.Module): """ Computing the match representation for Match LSTM. :param hidden_size: Size of hidden vectors. :param dropout_rate: Dropout rate of the projection layer. Defaults to 0. Examples: >>> import torch >>> attention = MatchModule(hidden_size=10) >>> v1 = torch.randn(4, 5, 10) >>> v1.shape torch.Size([4, 5, 10]) >>> v2 = torch.randn(4, 5, 10) >>> v2_mask = torch.ones(4, 5).to(dtype=torch.uint8) >>> attention(v1, v2, v2_mask).shape torch.Size([4, 5, 20]) """ def __init__(self, hidden_size, dropout_rate=0): """Init.""" super().__init__() self.v2_proj = nn.Linear(hidden_size, hidden_size) self.proj = nn.Linear(hidden_size * 4, hidden_size * 2) self.dropout = nn.Dropout(p=dropout_rate) def forward(self, input_0, input_1, input_2): primals_1 = self.v2_proj.weight primals_2 = self.v2_proj.bias primals_6 = self.proj.weight primals_7 = self.proj.bias primals_3 = input_0 primals_4 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
zfjsail/MatchZoo-py
MatchModule
false
4,708
[ "Apache-2.0" ]
0
c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
https://github.com/zfjsail/MatchZoo-py/tree/c93e52e7db7e257b46bb8bf8df8ce1ab1944e2f2
Net
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.conv2 = nn.Conv2d(10, 20, kernel_size=3) self.conv3 = nn.Conv2d(20, 50, kernel_size=3) self.conv4 = nn.Conv2d(50, 2, kernel_size=1, bias=False, padding=0, stride=1) self.max_pool2d = nn.MaxPool2d((4, 4)) self.softmax = nn.Softmax(dim=1) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2(x), 2)) x = self.conv3(x) x = self.conv4(x) x = self.max_pool2d(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 153760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 38440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x3 = xindex // 31 x2 = xindex // 9610 x4 = xindex % 9610 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + 9728 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 67280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 841 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 15680 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 % 14 x4 = xindex // 196 x3 = xindex // 3920 x5 = xindex % 3920 x6 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x5 + 3968 * x3), tmp15, xmask) tl.store(out_ptr1 + x6, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 144 % 50 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 72 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 48 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0 + 48 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0 + 48 * x1), xmask, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0 + 48 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (12 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (13 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (14 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (15 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (24 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (25 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (26 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (27 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (36 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (37 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (38 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (39 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp31 = tmp1 > tmp0 tmp32 = tl.full([1], 1, tl.int8) tmp33 = tl.full([1], 0, tl.int8) tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp3 > tmp2 tmp36 = tl.full([1], 2, tl.int8) tmp37 = tl.where(tmp35, tmp36, tmp34) tmp38 = tmp5 > tmp4 tmp39 = tl.full([1], 3, tl.int8) tmp40 = tl.where(tmp38, tmp39, tmp37) tmp41 = tmp7 > tmp6 tmp42 = tl.full([1], 4, tl.int8) tmp43 = tl.where(tmp41, tmp42, tmp40) tmp44 = tmp9 > tmp8 tmp45 = tl.full([1], 5, tl.int8) tmp46 = tl.where(tmp44, tmp45, tmp43) tmp47 = tmp11 > tmp10 tmp48 = tl.full([1], 6, tl.int8) tmp49 = tl.where(tmp47, tmp48, tmp46) tmp50 = tmp13 > tmp12 tmp51 = tl.full([1], 7, tl.int8) tmp52 = tl.where(tmp50, tmp51, tmp49) tmp53 = tmp15 > tmp14 tmp54 = tl.full([1], 8, tl.int8) tmp55 = tl.where(tmp53, tmp54, tmp52) tmp56 = tmp17 > tmp16 tmp57 = tl.full([1], 9, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp19 > tmp18 tmp60 = tl.full([1], 10, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp21 > tmp20 tmp63 = tl.full([1], 11, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp23 > tmp22 tmp66 = tl.full([1], 12, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp25 > tmp24 tmp69 = tl.full([1], 13, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp27 > tmp26 tmp72 = tl.full([1], 14, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp29 > tmp28 tmp75 = tl.full([1], 15, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x2, tmp30, xmask) tl.store(out_ptr1 + x2, tmp76, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 72 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 9 x2 = xindex // 18 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 18 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (9 + x0 + 18 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (10, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (50, 20, 3, 3), (180, 9, 3, 1)) assert_size_stride(primals_7, (50,), (1,)) assert_size_stride(primals_8, (2, 50, 1, 1), (50, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 62, 62), (38440, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(153760)](buf1, primals_2, 153760, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 10, 31, 31), (9728, 961, 31, 1), torch.int8) buf3 = empty_strided_cuda((4, 10, 31, 31), (9610, 961, 31, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_1[grid(38440)](buf1, buf2, buf3, 38440, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 20, 29, 29), (16820, 841, 29, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(67280)](buf5, primals_5, 67280, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1), torch.int8) buf7 = empty_strided_cuda((4, 20, 14, 14), (3920, 196, 14, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_3[grid(15680)](buf5, buf6, buf7, 15680, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 50, 12, 12), (7200, 144, 12, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(28800)](buf9, primals_7, 28800, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 2, 12, 12), (288, 144, 12, 1)) buf11 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32) buf12 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(72)](buf10, buf11, buf12, 72, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32) triton_poi_fused__softmax_6[grid(72)](buf11, buf13, 72, XBLOCK=128, num_warps=4, num_stages=1) del buf11 return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf12, buf13) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.conv2 = nn.Conv2d(10, 20, kernel_size=3) self.conv3 = nn.Conv2d(20, 50, kernel_size=3) self.conv4 = nn.Conv2d(50, 2, kernel_size=1, bias=False, padding=0, stride=1) self.max_pool2d = nn.MaxPool2d((4, 4)) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
ygnn123/training_extensions
Net
false
4,709
[ "Apache-2.0" ]
0
c3aeba9359b0d4e0ef9c054de777d3ec081a9892
https://github.com/ygnn123/training_extensions/tree/c3aeba9359b0d4e0ef9c054de777d3ec081a9892
Hardtanh
import torch import torch.nn as nn class Hardtanh(nn.Module): def __init__(self): super(Hardtanh, self).__init__() self.layer = nn.Hardtanh(-2, 2) def forward(self, x): x = self.layer(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -2.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardtanhNew(nn.Module): def __init__(self): super(HardtanhNew, self).__init__() self.layer = nn.Hardtanh(-2, 2) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
Hardtanh
false
4,710
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
AdaptiveMaxPool2d
import torch import torch.nn as nn class AdaptiveMaxPool2d(nn.Module): def __init__(self): super(AdaptiveMaxPool2d, self).__init__() self.layer = nn.AdaptiveMaxPool2d((5, 7)) def forward(self, x): x = self.layer(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 5 x0 = xindex % 7 x2 = xindex // 35 x4 = xindex tmp0 = 4 * x1 // 5 tmp1 = (8 + 4 * x1) // 5 tmp2 = tmp0 < tmp1 tmp3 = 4 * x0 // 7 tmp4 = (10 + 4 * x0) // 7 tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 7), tmp6 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp8 = 1 + 4 * x0 // 7 tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 7), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1 + 4 * x1 // 5 tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 7), tmp15 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp17 = triton_helpers.maximum(tmp16, tmp12) tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 7), tmp18 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp20 = triton_helpers.maximum(tmp19, tmp17) tl.store(out_ptr0 + x4, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 7), (140, 35, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0[grid(560)](arg0_1, buf0, 560, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AdaptiveMaxPool2dNew(nn.Module): def __init__(self): super(AdaptiveMaxPool2dNew, self).__init__() self.layer = nn.AdaptiveMaxPool2d((5, 7)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
AdaptiveMaxPool2d
false
4,711
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
CustomClassificationHead
from _paritybench_helpers import _mock_config import torch from torch import nn class CustomClassificationHead(nn.Module): def __init__(self, config, input_dim, n_labels): super().__init__() self.config = config self.fc1 = nn.Linear(input_dim, 4096) self.fc2 = nn.Linear(4096, 2048) self.fc3 = nn.Linear(2048, 1024) self.fc4 = nn.Linear(1024, n_labels) self.dropout = nn.Dropout(p=0.3) self.prelu1 = nn.PReLU() self.prelu2 = nn.PReLU() self.prelu3 = nn.PReLU() nn.init.kaiming_normal_(self.fc1.weight) nn.init.kaiming_normal_(self.fc2.weight) nn.init.kaiming_normal_(self.fc3.weight) nn.init.kaiming_normal_(self.fc4.weight) def forward(self, x): if len(x.size()) == 3: x = x[:, 0, :] x = self.prelu1(self.fc1(x)) x = self.prelu2(self.fc2(self.dropout(x))) x = self.prelu3(self.fc3(self.dropout(x))) return self.fc4(self.dropout(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(), 'input_dim': 4, 'n_labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__prelu_kernel_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, None) @triton.jit def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, None) @triton.jit def triton_poi_fused__prelu_kernel_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4096, 4), (4, 1)) assert_size_stride(primals_3, (4096,), (1,)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (2048, 4096), (4096, 1)) assert_size_stride(primals_6, (2048,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1024, 2048), (2048, 1)) assert_size_stride(primals_9, (1024,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (4, 1024), (1024, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4096), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_0[grid(262144)](buf0, primals_4, buf1, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0), reinterpret_tensor(primals_5, (4096, 2048), (1, 4096), 0), alpha=1, beta=1, out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32) triton_poi_fused__prelu_kernel_1[grid(131072)](buf2, primals_7, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf4 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_8, (2048, 1024), (1, 2048), 0), alpha=1, beta=1, out=buf4) del primals_9 buf5 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.float32) triton_poi_fused__prelu_kernel_2[grid(65536)](buf4, primals_10, buf5, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(buf5, (64, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_11, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf6) del primals_12 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_4, primals_7, primals_10, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4096), ( 4096, 1), 0), buf2, reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 1024), (1024, 1), 0 ), primals_11, primals_8, primals_5 class CustomClassificationHeadNew(nn.Module): def __init__(self, config, input_dim, n_labels): super().__init__() self.config = config self.fc1 = nn.Linear(input_dim, 4096) self.fc2 = nn.Linear(4096, 2048) self.fc3 = nn.Linear(2048, 1024) self.fc4 = nn.Linear(1024, n_labels) self.dropout = nn.Dropout(p=0.3) self.prelu1 = nn.PReLU() self.prelu2 = nn.PReLU() self.prelu3 = nn.PReLU() nn.init.kaiming_normal_(self.fc1.weight) nn.init.kaiming_normal_(self.fc2.weight) nn.init.kaiming_normal_(self.fc3.weight) nn.init.kaiming_normal_(self.fc4.weight) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_11 = self.fc4.weight primals_12 = self.fc4.bias primals_4 = self.prelu1.weight primals_7 = self.prelu2.weight primals_10 = self.prelu3.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
y-kamiya/emotion-classification
CustomClassificationHead
false
4,712
[ "MIT" ]
0
8d5b6ab4aafd60607260dc87e5360c04bf149e18
https://github.com/y-kamiya/emotion-classification/tree/8d5b6ab4aafd60607260dc87e5360c04bf149e18
TransposeMultiheadAttention
import torch import torch.nn as nn from typing import Optional import torch.utils.data import torch.nn class TransposeMultiheadAttention(nn.Module): """ Wrapper for nn.MultiheadAttention which first transposes the input tensor from (batch_size, seq_len, feature_dim) to (seq_length, batch_size, feature_dim), then applies the attention and transposes the attention outputs back to the input shape. """ def __init__(self, feature_dim: 'int', num_heads: 'int'=1): """ Args: feature_dim (int): attention embedding dimension num_heads (int): number of attention heads """ super().__init__() self._attention = nn.MultiheadAttention(embed_dim=feature_dim, num_heads=num_heads) self._attention_weights = None @property def attention_weights(self) ->Optional[torch.Tensor]: """ Contains attention weights from last forward call. """ return self._attention_weights def forward(self, x: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None ) ->torch.Tensor: """ Args: x (torch.Tensor): tensor of shape (batch_size, seq_len, feature_dim) mask (torch.Tensor): bool tensor with shape (batch_size, seq_len). Sequence elements that are False are invalid. Returns: Tensor with shape (batch_size, seq_len, feature_dim) """ assert x.dim( ) == 3, 'Requires x shape (batch_size x seq_len x feature_dim)' if mask is not None: mask[:, 0] = True mask = ~mask x = x.transpose(0, 1) attn_output, self._attention_weights = self._attention(x, x, x, key_padding_mask=mask) attn_output = attn_output.transpose(0, 1) return attn_output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from typing import Optional import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_mean_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = 1.0 tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(192)](buf1, primals_2, buf2, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32) triton_poi_fused_mul_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 64), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_mean_4[grid(64)](buf5, buf6, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 128), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_5 return reinterpret_tensor(buf9, (4, 4, 4), (4, 16, 1), 0 ), buf10, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 128 ), reinterpret_tensor(buf3, (4, 4, 4), (4, 1, 16), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 64) class TransposeMultiheadAttentionNew(nn.Module): """ Wrapper for nn.MultiheadAttention which first transposes the input tensor from (batch_size, seq_len, feature_dim) to (seq_length, batch_size, feature_dim), then applies the attention and transposes the attention outputs back to the input shape. """ def __init__(self, feature_dim: 'int', num_heads: 'int'=1): """ Args: feature_dim (int): attention embedding dimension num_heads (int): number of attention heads """ super().__init__() self._attention = nn.MultiheadAttention(embed_dim=feature_dim, num_heads=num_heads) self._attention_weights = None @property def attention_weights(self) ->Optional[torch.Tensor]: """ Contains attention weights from last forward call. """ return self._attention_weights def forward(self, input_0): primals_3 = self._attention.in_proj_weight primals_2 = self._attention.in_proj_bias primals_4 = self._attention.out_proj.weight primals_5 = self._attention.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
zijian-hu/pytorchvideo
TransposeMultiheadAttention
false
4,713
[ "Apache-2.0" ]
0
51589b100437af2285c56ce2ccc7ccecb7f9b18b
https://github.com/zijian-hu/pytorchvideo/tree/51589b100437af2285c56ce2ccc7ccecb7f9b18b
Interpolate
import torch import torch.nn as nn import torch.nn.functional as F class Interpolate(nn.Module): def __init__(self): super(Interpolate, self).__init__() def forward(self, x): x = F.interpolate(x, scale_factor=8, mode='nearest', align_corners=None ) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x2 = xindex // 1024 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.125 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(16384)](arg0_1, buf0, 16384, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class InterpolateNew(nn.Module): def __init__(self): super(InterpolateNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
Interpolate
false
4,714
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
PReLU
import torch import torch.nn as nn class PReLU(nn.Module): def __init__(self): super(PReLU, self).__init__() self.layer = nn.PReLU() def forward(self, x): x = self.layer(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class PReLUNew(nn.Module): def __init__(self): super(PReLUNew, self).__init__() self.layer = nn.PReLU() def forward(self, input_0): primals_1 = self.layer.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
yifanpu001/PytorchToCaffe
PReLU
false
4,715
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
leakyrelu
import torch import torch.nn as nn class leakyrelu(nn.Module): def __init__(self, layer=10, channels=32): super(leakyrelu, self).__init__() layers = [] for i in range(layer): layers.append(nn.LeakyReLU(inplace=True)) self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr3, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tmp7 = tmp5 * tmp3 tmp8 = tl.where(tmp6, tmp5, tmp7) tmp9 = tmp8 > tmp1 tmp10 = tmp8 * tmp3 tmp11 = tl.where(tmp9, tmp8, tmp10) tmp12 = tmp11 > tmp1 tmp13 = tmp11 * tmp3 tmp14 = tl.where(tmp12, tmp11, tmp13) tmp15 = tmp14 > tmp1 tmp16 = tmp14 * tmp3 tmp17 = tl.where(tmp15, tmp14, tmp16) tmp18 = tmp17 > tmp1 tmp19 = tmp17 * tmp3 tmp20 = tl.where(tmp18, tmp17, tmp19) tmp21 = tmp20 > tmp1 tmp22 = tmp20 * tmp3 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = tmp23 > tmp1 tmp25 = tmp23 * tmp3 tmp26 = tl.where(tmp24, tmp23, tmp25) tmp27 = tmp26 > tmp1 tmp28 = tmp26 * tmp3 tmp29 = tl.where(tmp27, tmp26, tmp28) tmp30 = tmp29 > tmp1 tmp31 = tmp29 * tmp3 tmp32 = tl.where(tmp30, tmp29, tmp31) tl.store(out_ptr3 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) return arg0_1, class leakyreluNew(nn.Module): def __init__(self, layer=10, channels=32): super(leakyreluNew, self).__init__() layers = [] for i in range(layer): layers.append(nn.LeakyReLU(inplace=True)) self.layers = nn.Sequential(*layers) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
leakyrelu
false
4,716
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
MaxPool2d
import torch import torch.nn as nn class MaxPool2d(nn.Module): def __init__(self): super(MaxPool2d, self).__init__() self.layer = nn.MaxPool2d(3, stride=2) def forward(self, x): x = self.layer(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + x0, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPool2dNew(nn.Module): def __init__(self): super(MaxPool2dNew, self).__init__() self.layer = nn.MaxPool2d(3, stride=2) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
MaxPool2d
false
4,717
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
PetarVGAT
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class BaseModel(nn.Module): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" pass @classmethod def build_model_from_args(cls, args): """Build a new model instance.""" raise NotImplementedError( 'Models must implement the build_model_from_args method') class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) N = h.size()[0] a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1) ], dim=1).view(N, -1, 2 * self.out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class PetarVGAT(BaseModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--num-features', type=int) parser.add_argument('--num-classes', type=int) parser.add_argument('--hidden-size', type=int, default=8) parser.add_argument('--dropout', type=float, default=0.6) parser.add_argument('--alpha', type=float, default=0.2) parser.add_argument('--nheads', type=int, default=8) @classmethod def build_model_from_args(cls, args): return cls(args.num_features, args.hidden_size, args.num_classes, args.dropout, args.alpha, args.nheads) def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(PetarVGAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout= dropout, alpha=alpha, concat=False) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout, training=self.training) x = F.elu(self.out_att(x, adj)) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + (4 * x1 + x0) % 16 % 4), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp40 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp41 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp51 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp52 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp57 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp58 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp74 = tl.load(in_ptr5 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp75 = tl.load(in_ptr6 + 4 * x0, xmask, eviction_policy='evict_last') tmp79 = tl.load(in_ptr5 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp80 = tl.load(in_ptr6 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp85 = tl.load(in_ptr5 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp86 = tl.load(in_ptr6 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp91 = tl.load(in_ptr5 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp92 = tl.load(in_ptr6 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp108 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp109 = tl.load(in_ptr8 + 4 * x0, xmask, eviction_policy='evict_last') tmp113 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp114 = tl.load(in_ptr8 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp119 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp120 = tl.load(in_ptr8 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp125 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp126 = tl.load(in_ptr8 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tmp42 = tmp41 * tmp3 tmp43 = tl.where(tmp40, tmp41, tmp42) tmp44 = tl.where(tmp0, tmp43, tmp6) tmp47 = tmp46 * tmp3 tmp48 = tl.where(tmp45, tmp46, tmp47) tmp49 = tl.where(tmp8, tmp48, tmp6) tmp50 = triton_helpers.maximum(tmp44, tmp49) tmp53 = tmp52 * tmp3 tmp54 = tl.where(tmp51, tmp52, tmp53) tmp55 = tl.where(tmp15, tmp54, tmp6) tmp56 = triton_helpers.maximum(tmp50, tmp55) tmp59 = tmp58 * tmp3 tmp60 = tl.where(tmp57, tmp58, tmp59) tmp61 = tl.where(tmp22, tmp60, tmp6) tmp62 = triton_helpers.maximum(tmp56, tmp61) tmp63 = tmp44 - tmp62 tmp64 = tl_math.exp(tmp63) tmp65 = tmp49 - tmp62 tmp66 = tl_math.exp(tmp65) tmp67 = tmp64 + tmp66 tmp68 = tmp55 - tmp62 tmp69 = tl_math.exp(tmp68) tmp70 = tmp67 + tmp69 tmp71 = tmp61 - tmp62 tmp72 = tl_math.exp(tmp71) tmp73 = tmp70 + tmp72 tmp76 = tmp75 * tmp3 tmp77 = tl.where(tmp74, tmp75, tmp76) tmp78 = tl.where(tmp0, tmp77, tmp6) tmp81 = tmp80 * tmp3 tmp82 = tl.where(tmp79, tmp80, tmp81) tmp83 = tl.where(tmp8, tmp82, tmp6) tmp84 = triton_helpers.maximum(tmp78, tmp83) tmp87 = tmp86 * tmp3 tmp88 = tl.where(tmp85, tmp86, tmp87) tmp89 = tl.where(tmp15, tmp88, tmp6) tmp90 = triton_helpers.maximum(tmp84, tmp89) tmp93 = tmp92 * tmp3 tmp94 = tl.where(tmp91, tmp92, tmp93) tmp95 = tl.where(tmp22, tmp94, tmp6) tmp96 = triton_helpers.maximum(tmp90, tmp95) tmp97 = tmp78 - tmp96 tmp98 = tl_math.exp(tmp97) tmp99 = tmp83 - tmp96 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp89 - tmp96 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp95 - tmp96 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp110 = tmp109 * tmp3 tmp111 = tl.where(tmp108, tmp109, tmp110) tmp112 = tl.where(tmp0, tmp111, tmp6) tmp115 = tmp114 * tmp3 tmp116 = tl.where(tmp113, tmp114, tmp115) tmp117 = tl.where(tmp8, tmp116, tmp6) tmp118 = triton_helpers.maximum(tmp112, tmp117) tmp121 = tmp120 * tmp3 tmp122 = tl.where(tmp119, tmp120, tmp121) tmp123 = tl.where(tmp15, tmp122, tmp6) tmp124 = triton_helpers.maximum(tmp118, tmp123) tmp127 = tmp126 * tmp3 tmp128 = tl.where(tmp125, tmp126, tmp127) tmp129 = tl.where(tmp22, tmp128, tmp6) tmp130 = triton_helpers.maximum(tmp124, tmp129) tmp131 = tmp112 - tmp130 tmp132 = tl_math.exp(tmp131) tmp133 = tmp117 - tmp130 tmp134 = tl_math.exp(tmp133) tmp135 = tmp132 + tmp134 tmp136 = tmp123 - tmp130 tmp137 = tl_math.exp(tmp136) tmp138 = tmp135 + tmp137 tmp139 = tmp129 - tmp130 tmp140 = tl_math.exp(tmp139) tmp141 = tmp138 + tmp140 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp39, xmask) tl.store(out_ptr2 + x0, tmp62, xmask) tl.store(out_ptr3 + x0, tmp73, xmask) tl.store(out_ptr4 + x0, tmp96, xmask) tl.store(out_ptr5 + x0, tmp107, xmask) tl.store(out_ptr6 + x0, tmp130, xmask) tl.store(out_ptr7 + x0, tmp141, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_out_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x2, xmask).to(tl.int1) tmp14 = tl.load(in_out_ptr1 + x2, xmask) tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr7 + x2, xmask).to(tl.int1) tmp24 = tl.load(in_out_ptr2 + x2, xmask) tmp28 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr10 + x2, xmask).to(tl.int1) tmp34 = tl.load(in_out_ptr3 + x2, xmask) tmp38 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp15 = tmp14 * tmp3 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tl.where(tmp0, tmp16, tmp6) tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp0, tmp26, tmp6) tmp29 = tmp27 - tmp28 tmp30 = tl_math.exp(tmp29) tmp32 = tmp30 / tmp31 tmp35 = tmp34 * tmp3 tmp36 = tl.where(tmp33, tmp34, tmp35) tmp37 = tl.where(tmp0, tmp36, tmp6) tmp39 = tmp37 - tmp38 tmp40 = tl_math.exp(tmp39) tmp42 = tmp40 / tmp41 tl.store(in_out_ptr0 + x2, tmp12, xmask) tl.store(in_out_ptr1 + x2, tmp22, xmask) tl.store(in_out_ptr2 + x2, tmp32, xmask) tl.store(in_out_ptr3 + x2, tmp42, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x2, tmp52, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp39, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_out_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(in_out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__log_softmax_elu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp9 = tmp8 > tmp1 tmp10 = tmp8 * tmp3 tmp11 = libdevice.expm1(tmp10) tmp12 = tmp11 * tmp3 tmp13 = tl.where(tmp9, tmp10, tmp12) tmp15 = tmp14 > tmp1 tmp16 = tmp14 * tmp3 tmp17 = libdevice.expm1(tmp16) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp15, tmp16, tmp18) tmp20 = triton_helpers.maximum(tmp13, tmp19) tmp22 = tmp21 > tmp1 tmp23 = tmp21 * tmp3 tmp24 = libdevice.expm1(tmp23) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp22, tmp23, tmp25) tmp27 = triton_helpers.maximum(tmp20, tmp26) tmp29 = tmp28 > tmp1 tmp30 = tmp28 * tmp3 tmp31 = libdevice.expm1(tmp30) tmp32 = tmp31 * tmp3 tmp33 = tl.where(tmp29, tmp30, tmp32) tmp34 = triton_helpers.maximum(tmp27, tmp33) tmp35 = tmp7 - tmp34 tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__log_softmax_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](primals_4, buf4, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_4 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_5, out=buf9) del primals_5 buf10 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf9, buf10, 128, XBLOCK=128, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf10, primals_6, out=buf11) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_7, out=buf17) del primals_7 buf18 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf17, buf18, 128, XBLOCK=128, num_warps=4, num_stages=1) buf19 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf18, primals_8, out=buf19) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_9, out=buf25) del primals_9 buf26 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf25, buf26, 128, XBLOCK=128, num_warps=4, num_stages=1) buf27 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf26, primals_10, out=buf27) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf27, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf22 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf30 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_2[grid(4)](buf4, buf3, buf2, buf12, buf11, buf20, buf19, buf28, buf27, buf5, buf6, buf13, buf14, buf21, buf22, buf29, buf30, 4, XBLOCK=4, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 buf15 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0) del buf11 buf23 = reinterpret_tensor(buf19, (4, 4), (4, 1), 0) del buf19 buf31 = reinterpret_tensor(buf27, (4, 4), (4, 1), 0) del buf27 triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(16)](buf7, buf15, buf23, buf31, buf4, buf3, buf5, buf6, buf12, buf13, buf14, buf20, buf21, buf22, buf28, buf29, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf13 del buf14 del buf21 del buf22 del buf29 del buf30 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf7, buf0, out=buf8) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf15, buf9, out=buf16) buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf23, buf17, out=buf24) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf31, buf25, out=buf32) buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_cat_4[grid(64)](buf8, buf16, buf24, buf32, buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf33, primals_11, out=buf34) buf35 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf34, buf35, 128, XBLOCK=128, num_warps=4, num_stages=1) buf36 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf35, primals_12, out=buf36) buf37 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf36, buf37, 16, XBLOCK=16, num_warps=1, num_stages=1) buf38 = buf6 del buf6 buf39 = buf5 del buf5 triton_poi_fused__softmax_leaky_relu_mul_where_5[grid(4)](buf4, buf37, buf36, buf38, buf39, 4, XBLOCK=4, num_warps=1, num_stages=1) buf40 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0) del buf36 triton_poi_fused__softmax_leaky_relu_mul_where_6[grid(16)](buf40, buf4, buf37, buf38, buf39, 16, XBLOCK=16, num_warps=1, num_stages=1 ) del buf38 del buf39 buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf40, buf34, out=buf41) buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_elu_7[grid(16)](buf41, buf42, 16, XBLOCK=16, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_8[grid(16)](buf42, buf43, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf42 return (buf43, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41, buf43, reinterpret_tensor(buf34, (4, 4), (1, 4), 0), reinterpret_tensor( buf35, (8, 16), (1, 8), 0), reinterpret_tensor(primals_12, (1, 8), (1, 1), 0), reinterpret_tensor(buf33, (16, 4), (1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor( buf26, (8, 16), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor( buf18, (8, 16), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), ( 1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(buf10, (8, 16), (1, 8), 0), reinterpret_tensor( primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(buf1, (8, 16), (1, 8), 0), reinterpret_tensor(primals_3, (1, 8), (1, 1), 0)) class BaseModel(nn.Module): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" pass @classmethod def build_model_from_args(cls, args): """Build a new model instance.""" raise NotImplementedError( 'Models must implement the build_model_from_args method') class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) N = h.size()[0] a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1) ], dim=1).view(N, -1, 2 * self.out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class PetarVGATNew(BaseModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--num-features', type=int) parser.add_argument('--num-classes', type=int) parser.add_argument('--hidden-size', type=int, default=8) parser.add_argument('--dropout', type=float, default=0.6) parser.add_argument('--alpha', type=float, default=0.2) parser.add_argument('--nheads', type=int, default=8) @classmethod def build_model_from_args(cls, args): return cls(args.num_features, args.hidden_size, args.num_classes, args.dropout, args.alpha, args.nheads) def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(PetarVGATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout= dropout, alpha=alpha, concat=False) def forward(self, input_0, input_1): primals_1 = self.attention_0.W primals_3 = self.attention_0.a primals_2 = self.attention_1.W primals_6 = self.attention_1.a primals_4 = self.attention_2.W primals_8 = self.attention_2.a primals_5 = self.attention_3.W primals_10 = self.attention_3.a primals_11 = self.out_att.W primals_12 = self.out_att.a primals_7 = input_0 primals_9 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
zxhhh97/cogdl
PetarVGAT
false
4,718
[ "MIT" ]
0
de21c78d9bbbf0c6cafbc72ff241cda35693ec37
https://github.com/zxhhh97/cogdl/tree/de21c78d9bbbf0c6cafbc72ff241cda35693ec37
ConvTranspose2d
import torch import torch.nn as nn class ConvTranspose2d(nn.Module): def __init__(self): super(ConvTranspose2d, self).__init__() self.convtranspose2d = nn.ConvTranspose2d(16, 33, 3, stride=2) def forward(self, x): x = self.convtranspose2d(x) return x def get_inputs(): return [torch.rand([4, 16, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 10692 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 33 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 33, 3, 3), (297, 9, 3, 1)) assert_size_stride(primals_2, (33,), (1,)) assert_size_stride(primals_3, (4, 16, 4, 4), (256, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 33, 9, 9), (2673, 81, 9, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(10692)](buf1, primals_2, 10692, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class ConvTranspose2dNew(nn.Module): def __init__(self): super(ConvTranspose2dNew, self).__init__() self.convtranspose2d = nn.ConvTranspose2d(16, 33, 3, stride=2) def forward(self, input_0): primals_1 = self.convtranspose2d.weight primals_2 = self.convtranspose2d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
yifanpu001/PytorchToCaffe
ConvTranspose2d
false
4,719
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
_Transition
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class _Transition(nn.Module): def __init__(self, in_channels, args): super(_Transition, self).__init__() self.pool = nn.Conv2d(in_channels, in_channels, kernel_size=2, stride=2, groups=in_channels) def forward(self, x): x = self.pool(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'args': _mock_config()}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class _TransitionNew(nn.Module): def __init__(self, in_channels, args): super(_TransitionNew, self).__init__() self.pool = nn.Conv2d(in_channels, in_channels, kernel_size=2, stride=2, groups=in_channels) def forward(self, input_0): primals_1 = self.pool.weight primals_2 = self.pool.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
yifanpu001/PytorchToCaffe
_Transition
false
4,720
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
Mul
import torch import torch.nn as nn class Mul(nn.Module): def __init__(self): super(Mul, self).__init__() def forward(self, x): x = torch.mul(x, 20) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MulNew(nn.Module): def __init__(self): super(MulNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
Mul
false
4,721
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
relu
import torch import torch.nn as nn class relu(nn.Module): def __init__(self, layer=10, channels=32): super(relu, self).__init__() layers = [] for i in range(layer): layers.append(nn.ReLU(inplace=True)) self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = triton_helpers.maximum(tmp1, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = triton_helpers.maximum(tmp1, tmp7) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = triton_helpers.maximum(tmp1, tmp10) tl.store(out_ptr1 + x0, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) return arg0_1, class reluNew(nn.Module): def __init__(self, layer=10, channels=32): super(reluNew, self).__init__() layers = [] for i in range(layer): layers.append(nn.ReLU(inplace=True)) self.layers = nn.Sequential(*layers) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
relu
false
4,722
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
Sub
import torch import torch.nn as nn class Sub(nn.Module): def __init__(self): super(Sub, self).__init__() def forward(self, x): x = torch.sub(x, 20) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SubNew(nn.Module): def __init__(self): super(SubNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
Sub
false
4,723
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
maxpool
import torch import torch.nn as nn class maxpool(nn.Module): def __init__(self, layer=10, channels=32): super(maxpool, self).__init__() layers = [] for i in range(layer): layers.append(nn.MaxPool2d(3, 1, 1)) self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tl.store(out_ptr0 + x4, tmp51, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused_max_pool2d_with_indices_0[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 return buf9, class maxpoolNew(nn.Module): def __init__(self, layer=10, channels=32): super(maxpoolNew, self).__init__() layers = [] for i in range(layer): layers.append(nn.MaxPool2d(3, 1, 1)) self.layers = nn.Sequential(*layers) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
maxpool
false
4,724
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
PositionWiseFeedForward
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """Implementation of the gelu activation function by Hugging Face""" return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class PositionWiseFeedForward(nn.Module): """ FeedForward Neural Networks for each position """ def __init__(self, cfg): super().__init__() self.fc1 = nn.Linear(cfg.dim, cfg.dim_ff) self.fc2 = nn.Linear(cfg.dim_ff, cfg.dim) def forward(self, x): return self.fc2(gelu(self.fc1(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(dim=4, dim_ff=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 def gelu(x): """Implementation of the gelu activation function by Hugging Face""" return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class PositionWiseFeedForwardNew(nn.Module): """ FeedForward Neural Networks for each position """ def __init__(self, cfg): super().__init__() self.fc1 = nn.Linear(cfg.dim, cfg.dim_ff) self.fc2 = nn.Linear(cfg.dim_ff, cfg.dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
akakakakakaa/pytorchic-bert
PositionWiseFeedForward
false
4,725
[ "Apache-2.0" ]
0
055d72adce9a41c322d23145840f31a94d9ffec4
https://github.com/akakakakakaa/pytorchic-bert/tree/055d72adce9a41c322d23145840f31a94d9ffec4
Conv2d
import torch import torch.nn as nn class Conv2d(nn.Module): def __init__(self): super(Conv2d, self).__init__() self.conv2d = nn.Conv2d(16, 33, kernel_size=1, padding=1, stride=2) def forward(self, x): x = self.conv2d(x) return x def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 143748 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 1089 % 33 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (33, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_2, (33,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 33, 33, 33), (35937, 1089, 33, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(143748)](buf1, primals_2, 143748, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class Conv2dNew(nn.Module): def __init__(self): super(Conv2dNew, self).__init__() self.conv2d = nn.Conv2d(16, 33, kernel_size=1, padding=1, stride=2) def forward(self, input_0): primals_1 = self.conv2d.weight primals_2 = self.conv2d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
yifanpu001/PytorchToCaffe
Conv2d
false
4,726
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
softmax
import torch import torch.nn as nn class softmax(nn.Module): def __init__(self, layer=10, channels=32): super(softmax, self).__init__() layers = [] for i in range(layer): layers.append(nn.Softmax(dim=1)) self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 >= tmp8 tmp10 = 1.0 tmp11 = -1.0 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tmp0 * tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = 0.0 tmp17 = tmp15 >= tmp16 tmp18 = 1.0 tmp19 = -1.0 tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp20 * tmp15 tmp22 = tmp8 / tmp21 tl.store(out_ptr0 + x3, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp5 = tl_math.exp(tmp4) tmp6 = tmp3 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = 0.0 tmp14 = tmp12 >= tmp13 tmp15 = 1.0 tmp16 = -1.0 tmp17 = tl.where(tmp14, tmp15, tmp16) tmp18 = tmp1 * tmp17 tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp10 = tl_math.exp(tmp9) tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = 0.0 tmp21 = tmp19 >= tmp20 tmp22 = 1.0 tmp23 = -1.0 tmp24 = tl.where(tmp21, tmp22, tmp23) tmp25 = tmp24 * tmp19 tmp26 = tmp8 / tmp25 tl.store(out_ptr0 + x3, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp5 = tl_math.exp(tmp4) tmp6 = tmp3 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tmp1 / tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf1, buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_3[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = buf0 del buf0 triton_poi_fused__softmax_4[grid(256)](buf3, buf2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = buf2 del buf2 triton_poi_fused__softmax_4[grid(256)](buf5, buf4, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = buf4 del buf4 triton_poi_fused__softmax_4[grid(256)](buf7, buf6, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_3[grid(256)](buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = buf6 del buf6 triton_poi_fused__softmax_4[grid(256)](buf9, buf8, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) buf11 = buf9 del buf9 triton_poi_fused__softmax_3[grid(256)](buf10, buf11, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf12 = buf8 del buf8 triton_poi_fused__softmax_4[grid(256)](buf11, buf10, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_3[grid(256)](buf12, buf13, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf14 = buf10 del buf10 triton_poi_fused__softmax_4[grid(256)](buf13, buf12, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) buf15 = buf13 del buf13 triton_poi_fused__softmax_3[grid(256)](buf14, buf15, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf16 = buf12 del buf12 triton_poi_fused__softmax_4[grid(256)](buf15, buf14, buf16, 256, XBLOCK=256, num_warps=4, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_3[grid(256)](buf16, buf17, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf18 = buf14 del buf14 triton_poi_fused__softmax_4[grid(256)](buf17, buf16, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf16 buf19 = buf17 del buf17 triton_poi_fused__softmax_5[grid(256)](buf18, buf19, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf18 return buf19, class softmaxNew(nn.Module): def __init__(self, layer=10, channels=32): super(softmaxNew, self).__init__() layers = [] for i in range(layer): layers.append(nn.Softmax(dim=1)) self.layers = nn.Sequential(*layers) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
yifanpu001/PytorchToCaffe
softmax
false
4,727
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
Attention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn from torch.nn import Dropout from torch.nn import Softmax from torch.nn import Linear class Attention(nn.Module): def __init__(self, config): super(Attention, self).__init__() self.num_attention_heads = config['num_heads'] self.attention_head_size = int(config['hidden_size'] / self. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = Linear(config['hidden_size'], self.all_head_size) self.key = Linear(config['hidden_size'], self.all_head_size) self.value = Linear(config['hidden_size'], self.all_head_size) self.out = Linear(self.all_head_size, config['hidden_size']) self.attn_dropout = Dropout(config['attention_dropout_rate']) self.proj_dropout = Dropout(config['attention_dropout_rate']) self.softmax = Softmax(dim=-1) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_probs = self.softmax(attention_scores) attention_probs = self.attn_dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) attention_output = self.out(context_layer) attention_output = self.proj_dropout(attention_output) return attention_output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(num_heads=4, hidden_size=4, attention_dropout_rate=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn import Dropout from torch.nn import Softmax from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_9 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_8 class AttentionNew(nn.Module): def __init__(self, config): super(AttentionNew, self).__init__() self.num_attention_heads = config['num_heads'] self.attention_head_size = int(config['hidden_size'] / self. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = Linear(config['hidden_size'], self.all_head_size) self.key = Linear(config['hidden_size'], self.all_head_size) self.value = Linear(config['hidden_size'], self.all_head_size) self.out = Linear(self.all_head_size, config['hidden_size']) self.attn_dropout = Dropout(config['attention_dropout_rate']) self.proj_dropout = Dropout(config['attention_dropout_rate']) self.softmax = Softmax(dim=-1) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_8 = self.out.weight primals_9 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
LJOVO/TranSalNet
Attention
false
4,728
[ "MIT" ]
0
a2aba83e3b8f54c47b712511bf4f515f236326ed
https://github.com/LJOVO/TranSalNet/tree/a2aba83e3b8f54c47b712511bf4f515f236326ed
LengthPredictor
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class LengthPredictionLoss(nn.Module): def __init__(self, max_delta=50): super().__init__() self.max_delta = max_delta def forward(self, logits, src_mask, tgt_mask): src_lens, tgt_lens = src_mask.sum(1), tgt_mask.sum(1) delta = (tgt_lens - src_lens + self.max_delta).clamp(0, self. max_delta * 2 - 1).long() loss = F.cross_entropy(logits, delta, reduction='mean') return {'length_prediction_loss': loss} class LengthPredictor(nn.Module): def __init__(self, hidden_size, max_delta=50): super().__init__() self.hidden_size = hidden_size self.max_delta = max_delta self._init_modules() self._init_loss() def forward(self, src, src_mask, tgt_len=None): src_mean = self._compute_mean_emb(src, src_mask) logits, delta = self._predict_delta(src_mean) return logits, delta def _predict_delta(self, src): logits = self.length_predictor(src) delta = logits.argmax(-1) - float(self.max_delta) return logits, delta def _compute_mean_emb(self, src, src_mask): mean_emb = (src * src_mask[:, :, None]).sum(1) / src_mask.sum(1)[:, None] return mean_emb def _init_modules(self): self.length_predictor = nn.Linear(self.hidden_size, self.max_delta * 2) def _init_loss(self): self.loss = LengthPredictionLoss(self.max_delta) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x0 = xindex % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = tmp1 + tmp4 tmp16 = tmp15 + tmp8 tmp17 = tmp16 + tmp12 tmp18 = tmp14 / tmp17 tl.store(out_ptr0 + x4, tmp18, xmask) @triton.jit def triton_per_fused_argmax_sub_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 rnumel = 100 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 100 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = tl.broadcast_to(rindex, tmp3.shape) _, tmp2_tmp = triton_helpers.max_with_index(tmp3, tmp4, 1) tmp2 = tmp2_tmp[:, None] tmp5 = tmp2.to(tl.float32) tmp6 = 50.0 tmp7 = tmp5 - tmp6 tl.store(out_ptr1 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (100, 4), (4, 1)) assert_size_stride(primals_4, (100,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_3, (4, 100), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_per_fused_argmax_sub_1[grid(64)](buf1, buf3, 64, 100, XBLOCK =8, num_warps=8, num_stages=1) return reinterpret_tensor(buf1, (4, 4, 4, 100), (1600, 400, 100, 1), 0 ), buf3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class LengthPredictionLoss(nn.Module): def __init__(self, max_delta=50): super().__init__() self.max_delta = max_delta def forward(self, logits, src_mask, tgt_mask): src_lens, tgt_lens = src_mask.sum(1), tgt_mask.sum(1) delta = (tgt_lens - src_lens + self.max_delta).clamp(0, self. max_delta * 2 - 1).long() loss = F.cross_entropy(logits, delta, reduction='mean') return {'length_prediction_loss': loss} class LengthPredictorNew(nn.Module): def __init__(self, hidden_size, max_delta=50): super().__init__() self.hidden_size = hidden_size self.max_delta = max_delta self._init_modules() self._init_loss() def _predict_delta(self, src): logits = self.length_predictor(src) delta = logits.argmax(-1) - float(self.max_delta) return logits, delta def _compute_mean_emb(self, src, src_mask): mean_emb = (src * src_mask[:, :, None]).sum(1) / src_mask.sum(1)[:, None] return mean_emb def _init_modules(self): self.length_predictor = nn.Linear(self.hidden_size, self.max_delta * 2) def _init_loss(self): self.loss = LengthPredictionLoss(self.max_delta) def forward(self, input_0, input_1): primals_3 = self.length_predictor.weight primals_4 = self.length_predictor.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
ygnn123/training_extensions
LengthPredictor
false
4,729
[ "Apache-2.0" ]
0
c3aeba9359b0d4e0ef9c054de777d3ec081a9892
https://github.com/ygnn123/training_extensions/tree/c3aeba9359b0d4e0ef9c054de777d3ec081a9892
toy_yolov3
import torch import torch.nn as nn import torch.nn.functional as F class toy_yolov3(nn.Module): def __init__(self): super(toy_yolov3, self).__init__() self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=2, padding=1) self.conv2_1 = nn.Conv2d(128, 128, kernel_size=1, stride=1, padding=0) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1) self.conv4 = nn.Conv2d(256, 255, kernel_size=3, stride=1, padding=1) self.conv5 = nn.Conv2d(255, 255, kernel_size=3, stride=2, padding=1) self.conv6 = nn.Conv2d(255, 255, kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv1(x) identity = x out = self.conv2_1(x) out = self.conv2_2(out) out += identity out = self.conv3(out) out = F.interpolate(out, scale_factor=2) out = torch.cat((out, identity), dim=1) out1 = self.conv4(out) out2 = self.conv5(out1) out3 = self.conv6(out2) return out3 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 65280 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 65025 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 255 y1 = yindex // 255 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 255 * x2 + 2295 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_add_convolution_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x2 = xindex // 8192 % 32 x1 = xindex // 256 % 32 x3 = xindex // 262144 x5 = xindex // 256 x6 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x2, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 16, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (128 * tmp13 + 2048 * tmp9 + 32768 * x3 + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp22 = tl.load(in_ptr3 + (128 * x5 + (-128 + x0)), tmp19, eviction_policy='evict_last', other=0.0) tmp23 = tl.where(tmp4, tmp18, tmp22) tl.store(out_ptr0 + x6, tmp23, None) @triton.jit def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 255 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 261120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 255 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_11(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 1020 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 255 y1 = yindex // 255 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 255 * x2 + 16320 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (128, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (255, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (255,), (1,)) assert_size_stride(primals_12, (255, 255, 3, 3), (2295, 9, 3, 1)) assert_size_stride(primals_13, (255,), (1,)) assert_size_stride(primals_14, (255, 255, 3, 3), (2295, 9, 3, 1)) assert_size_stride(primals_15, (255,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((128, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(384, 9)](primals_1, buf0, 384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_6, buf2, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_8, buf3, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf4 = empty_strided_cuda((255, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_3[grid(65280, 9)](primals_10, buf4, 65280, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf5 = empty_strided_cuda((255, 255, 3, 3), (2295, 1, 765, 255), torch.float32) triton_poi_fused_4[grid(65025, 9)](primals_12, buf5, 65025, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf6 = empty_strided_cuda((255, 255, 3, 3), (2295, 1, 765, 255), torch.float32) triton_poi_fused_4[grid(65025, 9)](primals_14, buf6, 65025, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf7 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_5[grid(524288)](buf8, primals_2, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf9 = extern_kernels.convolution(buf8, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf10 = buf9 del buf9 triton_poi_fused_convolution_5[grid(524288)](buf10, primals_5, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf11 = extern_kernels.convolution(buf10, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf12 = buf11 del buf11 triton_poi_fused_add_convolution_6[grid(524288)](buf12, primals_7, buf8, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf14 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf14, 32, XBLOCK=32, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256), torch.float32) triton_poi_fused_cat_8[grid(1048576)](buf14, buf13, primals_9, buf8, buf15, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf13 del primals_9 buf16 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 255, 32, 32), (261120, 1, 8160, 255)) buf17 = buf16 del buf16 triton_poi_fused_convolution_9[grid(1044480)](buf17, primals_11, 1044480, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf18 = extern_kernels.convolution(buf17, buf5, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 255, 16, 16), (65280, 1, 4080, 255)) buf19 = buf18 del buf18 triton_poi_fused_convolution_10[grid(261120)](buf19, primals_13, 261120, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf20 = extern_kernels.convolution(buf19, buf6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 255, 8, 8), (16320, 1, 2040, 255)) buf21 = empty_strided_cuda((4, 255, 8, 8), (16320, 64, 8, 1), torch .float32) triton_poi_fused_convolution_11[grid(1020, 64)](buf20, primals_15, buf21, 1020, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf20 del primals_15 return (buf21, buf0, buf1, primals_4, buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf12, buf14, buf15, buf17, buf19) class toy_yolov3New(nn.Module): def __init__(self): super(toy_yolov3New, self).__init__() self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=2, padding=1) self.conv2_1 = nn.Conv2d(128, 128, kernel_size=1, stride=1, padding=0) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1) self.conv4 = nn.Conv2d(256, 255, kernel_size=3, stride=1, padding=1) self.conv5 = nn.Conv2d(255, 255, kernel_size=3, stride=2, padding=1) self.conv6 = nn.Conv2d(255, 255, kernel_size=3, stride=2, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2_1.weight primals_5 = self.conv2_1.bias primals_6 = self.conv2_2.weight primals_7 = self.conv2_2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_10 = self.conv4.weight primals_11 = self.conv4.bias primals_12 = self.conv5.weight primals_13 = self.conv5.bias primals_14 = self.conv6.weight primals_15 = self.conv6.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
yifanpu001/PytorchToCaffe
toy_yolov3
false
4,730
[ "MIT" ]
0
37c1ebfc3547e93b1c174721036d03c831c60e48
https://github.com/yifanpu001/PytorchToCaffe/tree/37c1ebfc3547e93b1c174721036d03c831c60e48
RobertaClassificationHead
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, 2) def forward(self, features, **kwargs): x = features[:, 0, :] x = x.reshape(-1, x.size(-1) * 2) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 8), (8, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((8, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (8, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 4), (1, 8), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 triton_poi_fused_tanh_1[grid(32)](buf2, primals_3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((8, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return buf3, reinterpret_tensor(buf0, (8, 8), (8, 1), 0), buf2, primals_4 class RobertaClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, 2) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Masum06/CodeXGLUE
RobertaClassificationHead
false
4,731
[ "CC0-1.0", "MIT" ]
0
bf1ab8c8878f978bd4ef3cb5e030e52f03e92854
https://github.com/Masum06/CodeXGLUE/tree/bf1ab8c8878f978bd4ef3cb5e030e52f03e92854
RobustLogisticRegression
import torch import numpy as np from torch import nn from torch.utils.data import DataLoader from torchvision import transforms from sklearn.preprocessing import StandardScaler from sklearn import metrics from torch.utils.data import Dataset def compute_auc(labels, scores, pos_label=1): fpr, tpr, _thresholds = metrics.roc_curve(labels, scores, pos_label= pos_label) return metrics.auc(fpr, tpr) class Subset(Dataset): def __init__(self, data, labels, normalize=False): self.ims = data self.labels = labels self.normalize = normalize if normalize: self.T = transforms.Normalize(0.5, 0.5) else: self.T = lambda x: x def __getitem__(self, idx): ret = {'ims': self.T(self.ims[idx]), 'labels': self.labels[idx]} return ret def __len__(self): return self.labels.shape[0] class RobustLogisticRegression(nn.Module): def __init__(self, alpha=[1.0, 1.0, 1.0, 1.0], beta=1.0, alpha0=[1.0, 0.0, 0.0, 0.0]): super(RobustLogisticRegression, self).__init__() self.alpha = nn.Parameter(torch.Tensor(alpha)) if not len(alpha0) == 4: raise ValueError('alpha0 must be a vector og length 4') self.alpha0 = torch.Tensor(alpha0) self.beta = beta self.scaler = StandardScaler() def get_alpha(self): return self.alpha def forward(self, x): t = torch.Tensor(x) t = t * self.get_alpha() t = t.sum(1) return t def predict_prob(self, x): return 1 / (1 + torch.exp(-self(x))) def predict(self, x): return torch.round(self.predict_prob(x)) def prior_loss(self): return self.beta * torch.sqrt(torch.square(self.alpha0 - self.alpha ).sum()) def binary_acc(self, x, y): y_true = np.array(y) y_pred = self.predict(x).detach().numpy() nhits = (y_pred == y_true).sum() return nhits / y_true.shape[0] def auc(self, x, y): scores = self.predict_prob(x) return compute_auc(y, scores.detach().numpy()) def scaler_fit(self, x): self.scaler.fit(x) def scaler_transform(self, x): return self.scaler.transform(x) def save_weights(self, f): np.save(f, self.alpha_params.detach().numpy()) def fit(self, x, y, tst_x=None, tst_y=None, nepochs=2, batch_size=256, lr=0.01, opt_type='lbfgs', workers=1, balanced=True, verb=True, scale=False, early_stopping=False, patience=10): if scale: self.scaler_fit(x) x = self.scaler_transform(x) tst_x = tst_x if tst_x is None else self.scaler_transform(tst_x) if balanced: n1 = int(sum(y)) n0 = len(y) - n1 if n0 < n1: p = int(np.floor(n1 / n0)) X = np.concatenate((x[y == 0].repeat(p, 0), x[y == 1]), 0) Y = np.concatenate((y[y == 0].repeat(p, 0), y[y == 1]), 0) else: p = int(np.floor(n0 / n1)) X = np.concatenate((x[y == 1].repeat(p, 0), x[y == 0]), 0) Y = np.concatenate((y[y == 1].repeat(p, 0), y[y == 0]), 0) else: X = x Y = y loader = DataLoader(Subset(torch.tensor(X).float(), torch.Tensor(Y) ), batch_size=batch_size, shuffle=True, num_workers=workers) criterion = nn.BCEWithLogitsLoss() if opt_type == 'lbfgs': opt = torch.optim.LBFGS(self.parameters(), lr=lr) elif opt_type == 'adam': opt = torch.optim.Adam(self.parameters(), lr=lr) if nepochs == 2: nepochs = 200 else: raise ValueError('opt_type must be lbfgs or adam') best_auc = self.auc(x, y) pat = 0 if verb: None for epoch in range(nepochs): if verb: criterion(self(torch.Tensor(x)), torch.Tensor(y)).detach( ).numpy().round(3) self.prior_loss().detach().numpy().round(3) self.auc(x, y).round(3) self.binary_acc(x, y).round(3) np.NAN if tst_x is None else self.auc(tst_x, tst_y).round(3) _alpha = self.get_alpha().detach().numpy() None if early_stopping: cur_auc = self.auc(x, y) if cur_auc < best_auc: if pat < patience: pat += 1 else: if verb: None return else: best_auc = cur_auc pat = 0 for batch in loader: _x, _y = batch['ims'], batch['labels'] def loss_closure(): opt.zero_grad() pred = self(_x) loss = criterion(pred, _y) + self.prior_loss() loss.backward() return loss if opt_type == 'lbfgs': opt.step(loss_closure) elif opt_type == 'adam': loss_closure() opt.step() else: raise ValueError('opt_type must be lbfgs or adam') if (abs(self.alpha.detach().numpy()) > 1000000.0).any(): raise RuntimeError('convergence failed') def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch import nn from torch.utils.data import DataLoader from torchvision import transforms from sklearn.preprocessing import StandardScaler from sklearn import metrics from torch.utils.data import Dataset assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp6 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp9 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tl.store(out_ptr0 + x4, tmp11, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](primals_1, primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf0, primals_1 def compute_auc(labels, scores, pos_label=1): fpr, tpr, _thresholds = metrics.roc_curve(labels, scores, pos_label= pos_label) return metrics.auc(fpr, tpr) class Subset(Dataset): def __init__(self, data, labels, normalize=False): self.ims = data self.labels = labels self.normalize = normalize if normalize: self.T = transforms.Normalize(0.5, 0.5) else: self.T = lambda x: x def __getitem__(self, idx): ret = {'ims': self.T(self.ims[idx]), 'labels': self.labels[idx]} return ret def __len__(self): return self.labels.shape[0] class RobustLogisticRegressionNew(nn.Module): def __init__(self, alpha=[1.0, 1.0, 1.0, 1.0], beta=1.0, alpha0=[1.0, 0.0, 0.0, 0.0]): super(RobustLogisticRegressionNew, self).__init__() self.alpha = nn.Parameter(torch.Tensor(alpha)) if not len(alpha0) == 4: raise ValueError('alpha0 must be a vector og length 4') self.alpha0 = torch.Tensor(alpha0) self.beta = beta self.scaler = StandardScaler() def get_alpha(self): return self.alpha def predict_prob(self, x): return 1 / (1 + torch.exp(-self(x))) def predict(self, x): return torch.round(self.predict_prob(x)) def prior_loss(self): return self.beta * torch.sqrt(torch.square(self.alpha0 - self.alpha ).sum()) def binary_acc(self, x, y): y_true = np.array(y) y_pred = self.predict(x).detach().numpy() nhits = (y_pred == y_true).sum() return nhits / y_true.shape[0] def auc(self, x, y): scores = self.predict_prob(x) return compute_auc(y, scores.detach().numpy()) def scaler_fit(self, x): self.scaler.fit(x) def scaler_transform(self, x): return self.scaler.transform(x) def save_weights(self, f): np.save(f, self.alpha_params.detach().numpy()) def fit(self, x, y, tst_x=None, tst_y=None, nepochs=2, batch_size=256, lr=0.01, opt_type='lbfgs', workers=1, balanced=True, verb=True, scale=False, early_stopping=False, patience=10): if scale: self.scaler_fit(x) x = self.scaler_transform(x) tst_x = tst_x if tst_x is None else self.scaler_transform(tst_x) if balanced: n1 = int(sum(y)) n0 = len(y) - n1 if n0 < n1: p = int(np.floor(n1 / n0)) X = np.concatenate((x[y == 0].repeat(p, 0), x[y == 1]), 0) Y = np.concatenate((y[y == 0].repeat(p, 0), y[y == 1]), 0) else: p = int(np.floor(n0 / n1)) X = np.concatenate((x[y == 1].repeat(p, 0), x[y == 0]), 0) Y = np.concatenate((y[y == 1].repeat(p, 0), y[y == 0]), 0) else: X = x Y = y loader = DataLoader(Subset(torch.tensor(X).float(), torch.Tensor(Y) ), batch_size=batch_size, shuffle=True, num_workers=workers) criterion = nn.BCEWithLogitsLoss() if opt_type == 'lbfgs': opt = torch.optim.LBFGS(self.parameters(), lr=lr) elif opt_type == 'adam': opt = torch.optim.Adam(self.parameters(), lr=lr) if nepochs == 2: nepochs = 200 else: raise ValueError('opt_type must be lbfgs or adam') best_auc = self.auc(x, y) pat = 0 if verb: None for epoch in range(nepochs): if verb: criterion(self(torch.Tensor(x)), torch.Tensor(y)).detach( ).numpy().round(3) self.prior_loss().detach().numpy().round(3) self.auc(x, y).round(3) self.binary_acc(x, y).round(3) np.NAN if tst_x is None else self.auc(tst_x, tst_y).round(3) _alpha = self.get_alpha().detach().numpy() None if early_stopping: cur_auc = self.auc(x, y) if cur_auc < best_auc: if pat < patience: pat += 1 else: if verb: None return else: best_auc = cur_auc pat = 0 for batch in loader: _x, _y = batch['ims'], batch['labels'] def loss_closure(): opt.zero_grad() pred = self(_x) loss = criterion(pred, _y) + self.prior_loss() loss.backward() return loss if opt_type == 'lbfgs': opt.step(loss_closure) elif opt_type == 'adam': loss_closure() opt.step() else: raise ValueError('opt_type must be lbfgs or adam') if (abs(self.alpha.detach().numpy()) > 1000000.0).any(): raise RuntimeError('convergence failed') def forward(self, input_0): primals_2 = self.alpha primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
vitskvara/shape-guided-anomaly-detection
RobustLogisticRegression
false
4,732
[ "MIT" ]
0
6685b2e0b97968a6d0f478d2920486da107b277f
https://github.com/vitskvara/shape-guided-anomaly-detection/tree/6685b2e0b97968a6d0f478d2920486da107b277f
RobertaClassificationHead
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.checkpoint class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5, num_labels=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4 class RobertaClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Hzfinfdu/Black-Box-Tuning
RobertaClassificationHead
false
4,733
[ "MIT" ]
0
64eb5505875dc1b242c6f0a2a2f07e4000c24cb4
https://github.com/Hzfinfdu/Black-Box-Tuning/tree/64eb5505875dc1b242c6f0a2a2f07e4000c24cb4
BertAttention
from _paritybench_helpers import _mock_config import math import torch import torch.utils.data import torch.nn as nn import torch.nn import torch as torch import torch.sparse class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.softmax = nn.Softmax(dim=-1) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = self.softmax(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dense.bert_output_layer = True self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.utils.data import torch.nn as nn import torch.nn import torch as torch import torch.sparse assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_add_div_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_div_1[grid(64)](buf5, primals_8, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_add_div_2[grid(256)](buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_10 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.softmax = nn.Softmax(dim=-1) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = self.softmax(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dense.bert_output_layer = True self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
Sengxian/cogdl
BertAttention
false
4,734
[ "MIT" ]
0
b0a855feef6a883bcc0f7df421fc6092ec18abde
https://github.com/Sengxian/cogdl/tree/b0a855feef6a883bcc0f7df421fc6092ec18abde
InnerProductLayer
import torch import torch.nn as nn from sklearn.metrics import * class InnerProductLayer(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. Input shape - a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``. Output shape - 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape: ``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum. Arguments - **reduce_sum**: bool. Whether return inner product or element-wise product References - [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]// Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.] (https://arxiv.org/pdf/1611.00144.pdf)""" def __init__(self, reduce_sum=True, device='cpu'): super(InnerProductLayer, self).__init__() self.reduce_sum = reduce_sum self def forward(self, inputs): embed_list = inputs row = [] col = [] num_inputs = len(embed_list) for i in range(num_inputs - 1): for j in range(i + 1, num_inputs): row.append(i) col.append(j) p = torch.cat([embed_list[idx] for idx in row], dim=1) q = torch.cat([embed_list[idx] for idx in col], dim=1) inner_product = p * q if self.reduce_sum: inner_product = torch.sum(inner_product, dim=2, keepdim=True) return inner_product def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 24 x0 = xindex % 4 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (64 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (64 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tl.full([1], 24, tl.int64) tmp29 = tl.load(in_ptr0 + (128 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + x3, tmp34, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 24 x0 = xindex % 4 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 + x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (128 + x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (192 + x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (128 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (192 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tl.full([1], 24, tl.int64) tmp29 = tl.load(in_ptr0 + (192 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + x3, tmp34, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(384)](arg0_1, buf1, 384, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(96)](buf0, buf1, buf2, 96, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 del buf1 return buf2, class InnerProductLayerNew(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. Input shape - a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``. Output shape - 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape: ``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum. Arguments - **reduce_sum**: bool. Whether return inner product or element-wise product References - [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]// Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.] (https://arxiv.org/pdf/1611.00144.pdf)""" def __init__(self, reduce_sum=True, device='cpu'): super(InnerProductLayerNew, self).__init__() self.reduce_sum = reduce_sum self def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
zzz123xyz/DeepCTR-Torch
InnerProductLayer
false
4,735
[ "Apache-2.0" ]
0
d6b880cc6b3761dbef90920a28182ef6737dd665
https://github.com/zzz123xyz/DeepCTR-Torch/tree/d6b880cc6b3761dbef90920a28182ef6737dd665
BertLayer
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transform(self, x, linear_layer): bs, seq_len = x.shape[:2] proj = linear_layer(x) proj = proj.view(bs, seq_len, self.num_attention_heads, self. attention_head_size) proj = proj.transpose(1, 2) return proj def attention(self, key, query, value, attention_mask): S = torch.matmul(query, key.transpose(-1, -2) ) / self.attention_head_size ** (1 / 2) if attention_mask is not None: S += attention_mask S_p = F.softmax(S, dim=-1) S_p = self.dropout(S_p) context_layer = torch.matmul(S_p, value) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(new_context_layer_shape) return context_layer def forward(self, hidden_states, attention_mask): """ hidden_states: [bs, seq_len, hidden_state] attention_mask: [bs, 1, 1, seq_len] output: [bs, seq_len, hidden_state] """ key_layer = self.transform(hidden_states, self.key) value_layer = self.transform(hidden_states, self.value) query_layer = self.transform(hidden_states, self.query) attn_value = self.attention(key_layer, query_layer, value_layer, attention_mask) return attn_value class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.self_attention = BertSelfAttention(config) self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size ) self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps= config.layer_norm_eps) self.attention_dropout = nn.Dropout(config.hidden_dropout_prob) self.interm_dense = nn.Linear(config.hidden_size, config. intermediate_size) self.interm_af = F.gelu self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size ) self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.out_dropout = nn.Dropout(config.hidden_dropout_prob) def add_norm(self, input, output, dense_layer, dropout, ln_layer): """ input: the input output: the input that requires the sublayer to transform dense_layer, dropput: the sublayer ln_layer: layer norm that takes input+sublayer(output) #SAM NOTE Really? """ output = dropout(dense_layer(output)) output = ln_layer(input + output) return output def forward(self, hidden_states, attention_mask): """ hidden_states: either from the embedding layer (first bert layer) or from the previous bert layer as shown in the left of Figure 1 of https://arxiv.org/pdf/1706.03762.pdf each block consists of 1. a multi-head attention layer (BertSelfAttention) 2. a add-norm that takes the output of BertSelfAttention and the input of BertSelfAttention 3. a feed forward layer 4. a add-norm that takes the output of feed forward layer and the input of feed forward layer """ attention = self.self_attention(hidden_states, attention_mask) x = self.add_norm(hidden_states, attention, self.attention_dense, self.attention_dropout, self.attention_layer_norm) feed_forward = self.interm_dense(x) feed_forward = self.interm_af(feed_forward) out = self.add_norm(x, feed_forward, self.out_dense, self. out_dropout, self.out_layer_norm) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(num_attention_heads=4, hidden_size= 4, attention_probs_dropout_prob=0.5, layer_norm_eps=1, hidden_dropout_prob=0.5, intermediate_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf2, primals_7, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf2 triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf1, primals_5, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_14 buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_gelu_7[grid(64)](buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return buf23, primals_1, primals_11, primals_17, buf9, reinterpret_tensor( buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0 ), buf17, reinterpret_tensor(buf18, (16, 4), (4, 1), 0 ), buf20, primals_15, primals_13, primals_9 class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transform(self, x, linear_layer): bs, seq_len = x.shape[:2] proj = linear_layer(x) proj = proj.view(bs, seq_len, self.num_attention_heads, self. attention_head_size) proj = proj.transpose(1, 2) return proj def attention(self, key, query, value, attention_mask): S = torch.matmul(query, key.transpose(-1, -2) ) / self.attention_head_size ** (1 / 2) if attention_mask is not None: S += attention_mask S_p = F.softmax(S, dim=-1) S_p = self.dropout(S_p) context_layer = torch.matmul(S_p, value) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(new_context_layer_shape) return context_layer def forward(self, hidden_states, attention_mask): """ hidden_states: [bs, seq_len, hidden_state] attention_mask: [bs, 1, 1, seq_len] output: [bs, seq_len, hidden_state] """ key_layer = self.transform(hidden_states, self.key) value_layer = self.transform(hidden_states, self.value) query_layer = self.transform(hidden_states, self.query) attn_value = self.attention(key_layer, query_layer, value_layer, attention_mask) return attn_value class BertLayerNew(nn.Module): def __init__(self, config): super().__init__() self.self_attention = BertSelfAttention(config) self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size ) self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps= config.layer_norm_eps) self.attention_dropout = nn.Dropout(config.hidden_dropout_prob) self.interm_dense = nn.Linear(config.hidden_size, config. intermediate_size) self.interm_af = F.gelu self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size ) self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.out_dropout = nn.Dropout(config.hidden_dropout_prob) def add_norm(self, input, output, dense_layer, dropout, ln_layer): """ input: the input output: the input that requires the sublayer to transform dense_layer, dropput: the sublayer ln_layer: layer norm that takes input+sublayer(output) #SAM NOTE Really? """ output = dropout(dense_layer(output)) output = ln_layer(input + output) return output def forward(self, input_0, input_1): primals_2 = self.self_attention.query.weight primals_3 = self.self_attention.query.bias primals_4 = self.self_attention.key.weight primals_5 = self.self_attention.key.bias primals_6 = self.self_attention.value.weight primals_7 = self.self_attention.value.bias primals_9 = self.attention_dense.weight primals_10 = self.attention_dense.bias primals_11 = self.attention_layer_norm.weight primals_12 = self.attention_layer_norm.bias primals_13 = self.interm_dense.weight primals_14 = self.interm_dense.bias primals_15 = self.out_dense.weight primals_16 = self.out_dense.bias primals_17 = self.out_layer_norm.weight primals_18 = self.out_layer_norm.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
SamarthMM/cs769-assignments
BertLayer
false
4,736
[ "MIT" ]
0
bac2ad57c50043608276df8e0f21181ef62696c7
https://github.com/SamarthMM/cs769-assignments/tree/bac2ad57c50043608276df8e0f21181ef62696c7
Gate
import torch import torch.nn as nn from scipy.stats import entropy as entropy from scipy.spatial.distance import cosine as cosine class Gate(nn.Module): def __init__(self, hidden_size): super(Gate, self).__init__() self.transform = nn.Linear(hidden_size * 2, hidden_size) nn.init.kaiming_normal_(self.transform.weight) def forward(self, query, key): r = self.transform(torch.cat((query.expand(key.size(0), -1), key), -1)) gate = torch.sigmoid(r) return gate def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from scipy.stats import entropy as entropy from scipy.spatial.distance import cosine as cosine assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_sigmoid_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 return buf2, buf0, buf2 class GateNew(nn.Module): def __init__(self, hidden_size): super(GateNew, self).__init__() self.transform = nn.Linear(hidden_size * 2, hidden_size) nn.init.kaiming_normal_(self.transform.weight) def forward(self, input_0, input_1): primals_3 = self.transform.weight primals_4 = self.transform.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
yanda-wang/AMHSC
Gate
false
4,737
[ "MIT" ]
0
9b0a48d1f0992ca3272e7089835a946c49d5f50d
https://github.com/yanda-wang/AMHSC/tree/9b0a48d1f0992ca3272e7089835a946c49d5f50d
BertSelfAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn import torch.utils.checkpoint class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self. max_position_embeddings - 1) positional_embedding = positional_embedding if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else ( context_layer,) return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, position_embedding_type=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf9 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Hzfinfdu/Black-Box-Tuning
BertSelfAttention
false
4,738
[ "MIT" ]
0
64eb5505875dc1b242c6f0a2a2f07e4000c24cb4
https://github.com/Hzfinfdu/Black-Box-Tuning/tree/64eb5505875dc1b242c6f0a2a2f07e4000c24cb4
Classifier3
import torch import torch.nn import torch.utils.data import torch.nn.functional as F import torch.nn.parallel class Classifier3(torch.nn.Module): def __init__(self): super(Classifier3, self).__init__() self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv2 = torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1) self.conv3 = torch.nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1) self.pool = torch.nn.MaxPool2d(2, 2) self.fc1 = torch.nn.Linear(256 * 8 * 8, 1024) self.fc2 = torch.nn.Linear(1024, 512) self.fc3 = torch.nn.Linear(512, 3) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(-1, 256 * 8 * 8) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = yindex // 8 y5 = yindex y4 = yindex // 64 y6 = yindex % 64 tmp0 = tl.load(in_ptr0 + (x2 + 512 * y0 + 8192 * y1), xmask & ymask) tmp1 = tl.load(in_ptr0 + (256 + x2 + 512 * y0 + 8192 * y1), xmask & ymask) tmp7 = tl.load(in_ptr0 + (4096 + x2 + 512 * y0 + 8192 * y1), xmask & ymask) tmp12 = tl.load(in_ptr0 + (4352 + x2 + 512 * y0 + 8192 * y1), xmask & ymask ) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 256 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 64 * x2 + 16384 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (1024, 16384), (16384, 1)) assert_size_stride(primals_9, (1024,), (1,)) assert_size_stride(primals_10, (512, 1024), (1024, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (3, 512), (512, 1)) assert_size_stride(primals_13, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_4, buf2, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(32768, 9)](primals_6, buf3, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_4[grid(1048576)](buf5, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf6 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf7 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(262144)](buf5, buf6, buf7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(524288)](buf9, primals_5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(131072)](buf9, buf10, buf11, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_8[grid(262144)](buf13, primals_7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf14 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) buf15 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_9[grid(256, 256)](buf13, buf14, buf15, 256, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) buf16 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (4, 16384), (16384, 1), 0), reinterpret_tensor(primals_8, (16384, 1024), (1, 16384), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_10[grid(4096)](buf17, primals_9, 4096, XBLOCK =256, num_warps=4, num_stages=1) del primals_9 buf18 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_10, (1024, 512), (1, 1024), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_relu_11[grid(2048)](buf19, primals_11, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf20 = empty_strided_cuda((4, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_13, buf19, reinterpret_tensor( primals_12, (512, 3), (1, 512), 0), alpha=1, beta=1, out=buf20) del primals_13 return (buf20, buf0, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, reinterpret_tensor(buf15, (4, 16384), (16384, 1), 0), buf17, buf19, primals_12, primals_10, primals_8) class Classifier3New(torch.nn.Module): def __init__(self): super(Classifier3New, self).__init__() self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv2 = torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1) self.conv3 = torch.nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1) self.pool = torch.nn.MaxPool2d(2, 2) self.fc1 = torch.nn.Linear(256 * 8 * 8, 1024) self.fc2 = torch.nn.Linear(1024, 512) self.fc3 = torch.nn.Linear(512, 3) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_12 = self.fc3.weight primals_13 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
yuping1624/1082NCTU-Deep-Learning
Classifier3
false
4,739
[ "MIT" ]
0
dc83e1c8709e9610a996f02091fe626f07b3c10f
https://github.com/yuping1624/1082NCTU-Deep-Learning/tree/dc83e1c8709e9610a996f02091fe626f07b3c10f
Net
import torch import torch.nn as nn import torch.nn.functional as F import torch._utils class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5, stride=(2, 2)) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5, stride=(2, 2)) self.fc1 = nn.Linear(16 * 13 * 13, 2048) self.fc2 = nn.Linear(2048, 512) self.fc3 = nn.Linear(512, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 13 * 13) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 432, 432])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1099104 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 45796 % 6 x0 = xindex % 45796 x4 = xindex // 45796 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 45824 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 274776 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 107 x1 = xindex // 107 % 107 x2 = xindex // 11449 x3 = xindex % 11449 tmp0 = tl.load(in_ptr0 + (2 * x0 + 428 * x1 + 45824 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 428 * x1 + 45824 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (214 + 2 * x0 + 428 * x1 + 45824 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (215 + 2 * x0 + 428 * x1 + 45824 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 11456 * x2), tmp6, xmask) tl.store(out_ptr1 + (x3 + 11520 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 173056 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 2704 % 16 x0 = xindex % 2704 x4 = xindex // 2704 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 2720 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 26 x1 = xindex // 26 % 26 x4 = xindex // 676 x3 = xindex // 10816 x5 = xindex % 10816 x6 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 104 * x1 + 2720 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 104 * x1 + 2720 * x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (52 + 2 * x0 + 104 * x1 + 2720 * x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (53 + 2 * x0 + 104 * x1 + 2720 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x5 + 10880 * x3), tmp15, xmask) tl.store(out_ptr1 + x6, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 432, 432), (559872, 186624, 432, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (2048, 2704), (2704, 1)) assert_size_stride(primals_7, (2048,), (1,)) assert_size_stride(primals_8, (512, 2048), (2048, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (10, 512), (512, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 214, 214), (274776, 45796, 214, 1)) buf1 = empty_strided_cuda((4, 6, 214, 214), (274944, 45824, 214, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1099104)](buf0, primals_2, buf1, 1099104, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 6, 107, 107), (68736, 11456, 107, 1), torch.float32) buf3 = empty_strided_cuda((4, 6, 107, 107), (69120, 11520, 107, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(274776)](buf1, buf2, buf3, 274776, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 52, 52), (43264, 2704, 52, 1)) buf5 = empty_strided_cuda((4, 16, 52, 52), (43520, 2720, 52, 1), torch.float32) triton_poi_fused_convolution_relu_2[grid(173056)](buf4, primals_5, buf5, 173056, XBLOCK=512, num_warps=8, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 16, 26, 26), (10880, 676, 26, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 26, 26), (10816, 676, 26, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_3[grid(43264)](buf5, buf6, buf7, 43264, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (16, 2704), (2704, 1), 0 ), reinterpret_tensor(primals_6, (2704, 2048), (1, 2704), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(32768)](buf9, primals_7, 32768, XBLOCK =256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (2048, 512), (1, 2048), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(8192)](buf11, primals_9, 8192, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((16, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (512, 10), (1, 512), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (16, 2704), (2704, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5, stride=(2, 2)) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5, stride=(2, 2)) self.fc1 = nn.Linear(16 * 13 * 13, 2048) self.fc2 = nn.Linear(2048, 512) self.fc3 = nn.Linear(512, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
zyouc518/crow
Net
false
4,740
[ "Apache-2.0" ]
0
e3fe92e329649fb82b3fef6c0ab5b732f1918900
https://github.com/zyouc518/crow/tree/e3fe92e329649fb82b3fef6c0ab5b732f1918900
CrossEntropyLoss
import torch import torch.utils.cpp_extension class CrossEntropyLoss(torch.nn.Module): def __init__(self): super(CrossEntropyLoss, self).__init__() self.ce_loss = torch.nn.CrossEntropyLoss() def forward(self, cls_output, label, **_): return self.ce_loss(cls_output, label).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = 1.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mean_mul_neg_sum_1[grid(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class CrossEntropyLossNew(torch.nn.Module): def __init__(self): super(CrossEntropyLossNew, self).__init__() self.ce_loss = torch.nn.CrossEntropyLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
yingnengd/MyGAN
CrossEntropyLoss
false
4,741
[ "MIT" ]
0
6e4abbe165c8f3b1e1b69d5d01177712761a3a1c
https://github.com/yingnengd/MyGAN/tree/6e4abbe165c8f3b1e1b69d5d01177712761a3a1c