entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
MultiscaleRecLoss
import torch import torch.nn as nn class MultiscaleRecLoss(nn.Module): def __init__(self, scale=3, rec_loss_type='l1', multiscale=True): super(MultiscaleRecLoss, self).__init__() self.multiscale = multiscale if rec_loss_type == 'l1': self.criterion = nn.L1Loss() elif rec_loss_type == 'smoothl1': self.criterion = nn.SmoothL1Loss() elif rec_loss_type == 'l2': self.criterion = nn.MSELoss() else: raise NotImplementedError('Loss [{}] is not implemented'.format (rec_loss_type)) self.downsample = nn.AvgPool2d(2, stride=2, count_include_pad=False) if self.multiscale: self.weights = [1.0, 1.0 / 2, 1.0 / 4] self.weights = self.weights[:scale] def forward(self, input, target): loss = 0 pred = input.clone() gt = target.clone() if self.multiscale: for i in range(len(self.weights)): loss += self.weights[i] * self.criterion(pred, gt) if i != len(self.weights) - 1: pred = self.downsample(pred) gt = self.downsample(gt) else: loss = self.criterion(pred, gt) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None) @triton.jit def triton_per_fused_abs_avg_pool2d_mean_sub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 2 r1 = rindex // 2 r2 = rindex tmp0 = tl.load(in_ptr0 + (2 * r0 + 8 * r1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * r0 + 8 * r1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * r0 + 8 * r1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * r0 + 8 * r1), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + (2 * r0 + 8 * r1), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (1 + 2 * r0 + 8 * r1), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (4 + 2 * r0 + 8 * r1), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + (5 + 2 * r0 + 8 * r1), None, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp11 = tmp10 + tmp9 tmp13 = tmp12 + tmp11 tmp15 = tmp14 + tmp13 tmp16 = tmp15 * tmp7 tmp17 = tmp8 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp8, None) tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None) @triton.jit def triton_per_fused_abs_add_avg_pool2d_mean_mul_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp22 = tl.load(in_out_ptr0 + 0) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, 1]) tmp30 = tl.load(in_ptr2 + 0) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, 1]) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp11 = tmp10 + tmp9 tmp13 = tmp12 + tmp11 tmp15 = tmp14 + tmp13 tmp16 = tmp15 * tmp7 tmp17 = tmp8 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp24 = 256.0 tmp25 = tmp23 / tmp24 tmp26 = 1.0 tmp27 = tmp25 * tmp26 tmp28 = 0.0 tmp29 = tmp27 + tmp28 tmp32 = 64.0 tmp33 = tmp31 / tmp32 tmp34 = 0.5 tmp35 = tmp33 * tmp34 tmp36 = tmp29 + tmp35 tmp37 = 16.0 tmp38 = tmp21 / tmp37 tmp39 = tmp38 * tmp7 tmp40 = tmp36 + tmp39 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp40, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_abs_mean_sub_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_abs_avg_pool2d_mean_sub_1[grid(1)](arg0_1, arg1_1, buf1, buf2, buf3, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = buf0 del buf0 triton_per_fused_abs_add_avg_pool2d_mean_mul_sub_2[grid(1)](buf5, buf1, buf2, buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 del buf3 return buf5, class MultiscaleRecLossNew(nn.Module): def __init__(self, scale=3, rec_loss_type='l1', multiscale=True): super(MultiscaleRecLossNew, self).__init__() self.multiscale = multiscale if rec_loss_type == 'l1': self.criterion = nn.L1Loss() elif rec_loss_type == 'smoothl1': self.criterion = nn.SmoothL1Loss() elif rec_loss_type == 'l2': self.criterion = nn.MSELoss() else: raise NotImplementedError('Loss [{}] is not implemented'.format (rec_loss_type)) self.downsample = nn.AvgPool2d(2, stride=2, count_include_pad=False) if self.multiscale: self.weights = [1.0, 1.0 / 2, 1.0 / 4] self.weights = self.weights[:scale] def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
eezkni/UEGAN
MultiscaleRecLoss
false
15,288
[ "MIT" ]
73
a6616ac559819d487cae0f301d98cf2922a11a09
https://github.com/eezkni/UEGAN/tree/a6616ac559819d487cae0f301d98cf2922a11a09
FocalLoss
import torch def _neg_loss(pred, gt): """ Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory (https://github.com/tianweiy/CenterPoint) Arguments: pred (batch x c x h x w) gt (batch x c x h x w) """ pos_inds = gt.eq(1).float() neg_inds = gt.lt(1).float() neg_weights = torch.pow(1 - gt, 4) pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds neg_loss = torch.log(1 - pred) * torch.pow(pred, 2 ) * neg_weights * neg_inds return -(pos_loss + neg_loss) class FocalLoss(torch.nn.Module): """nn.Module warpper for focal loss""" def __init__(self): super(FocalLoss, self).__init__() self.neg_loss = _neg_loss def forward(self, out, target): return self.neg_loss(out, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_eq_log_lt_mul_neg_pow_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp6 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl_math.log(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp1 * tmp4 tmp7 = tmp6 == tmp2 tmp8 = tmp7.to(tl.float32) tmp9 = tmp5 * tmp8 tmp10 = tl_math.log(tmp3) tmp11 = tmp0 * tmp0 tmp12 = tmp10 * tmp11 tmp13 = tmp2 - tmp6 tmp14 = tmp13 * tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp12 * tmp15 tmp17 = tmp6 < tmp2 tmp18 = tmp17.to(tl.float32) tmp19 = tmp16 * tmp18 tmp20 = tmp9 + tmp19 tmp21 = -tmp20 tl.store(out_ptr0 + x0, tmp21, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_add_eq_log_lt_mul_neg_pow_rsub_0[grid(256)]( arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def _neg_loss(pred, gt): """ Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory (https://github.com/tianweiy/CenterPoint) Arguments: pred (batch x c x h x w) gt (batch x c x h x w) """ pos_inds = gt.eq(1).float() neg_inds = gt.lt(1).float() neg_weights = torch.pow(1 - gt, 4) pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds neg_loss = torch.log(1 - pred) * torch.pow(pred, 2 ) * neg_weights * neg_inds return -(pos_loss + neg_loss) class FocalLossNew(torch.nn.Module): """nn.Module warpper for focal loss""" def __init__(self): super(FocalLossNew, self).__init__() self.neg_loss = _neg_loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
edwardzhou130/Panoptic-PolarNet
FocalLoss
false
15,289
[ "BSD-3-Clause" ]
90
3a72f2380a4e505e191b69da596f521a9d9f1a71
https://github.com/edwardzhou130/Panoptic-PolarNet/tree/3a72f2380a4e505e191b69da596f521a9d9f1a71
Sine
import torch from torch import nn class Sine(nn.Module): def __init__(self, w0=30): super().__init__() self.w0 = w0 def forward(self, input): return torch.sin(self.w0 * input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 30.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.sin(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sin_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SineNew(nn.Module): def __init__(self, w0=30): super().__init__() self.w0 = w0 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
eliemichel/ACORN
Sine
false
15,290
[ "MIT" ]
186
ca1b776e585251bd20468038c343decbbd62abf3
https://github.com/eliemichel/ACORN/tree/ca1b776e585251bd20468038c343decbbd62abf3
CoPredictor
import torch import torch.autograd import torch.nn as nn class Biaffine(nn.Module): def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True): super(Biaffine, self).__init__() self.n_in = n_in self.n_out = n_out self.bias_x = bias_x self.bias_y = bias_y weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y))) nn.init.xavier_normal_(weight) self.weight = nn.Parameter(weight, requires_grad=True) def extra_repr(self): s = f'n_in={self.n_in}, n_out={self.n_out}' if self.bias_x: s += f', bias_x={self.bias_x}' if self.bias_y: s += f', bias_y={self.bias_y}' return s def forward(self, x, y): if self.bias_x: x = torch.cat((x, torch.ones_like(x[..., :1])), -1) if self.bias_y: y = torch.cat((y, torch.ones_like(y[..., :1])), -1) s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y) s = s.permute(0, 2, 3, 1) return s class MLP(nn.Module): def __init__(self, n_in, n_out, dropout=0): super().__init__() self.linear = nn.Linear(n_in, n_out) self.activation = nn.GELU() self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.dropout(x) x = self.linear(x) x = self.activation(x) return x class CoPredictor(nn.Module): def __init__(self, cls_num, hid_size, biaffine_size, channels, ffnn_hid_size, dropout=0): super().__init__() self.mlp1 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout) self.mlp2 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout) self.biaffine = Biaffine(n_in=biaffine_size, n_out=cls_num, bias_x= True, bias_y=True) self.mlp_rel = MLP(channels, ffnn_hid_size, dropout=dropout) self.linear = nn.Linear(ffnn_hid_size, cls_num) self.dropout = nn.Dropout(dropout) def forward(self, x, y, z): h = self.dropout(self.mlp1(x)) t = self.dropout(self.mlp2(y)) o1 = self.biaffine(h, t) z = self.dropout(self.mlp_rel(z)) o2 = self.linear(z) return o1 + o2 def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'cls_num': 4, 'hid_size': 4, 'biaffine_size': 4, 'channels': 4, 'ffnn_hid_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.autograd import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = 0.7071067811865476 tmp9 = tmp5 * tmp8 tmp10 = libdevice.erf(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp7 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp19 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp20 = tl.where(tmp16, tmp11, tmp19) tmp21 = tl.where(tmp4, tmp15, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 100 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 % 4 x2 = xindex // 20 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 5 * x2 + 25 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 20 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = xindex // 4 y0 = yindex % 5 y1 = yindex // 5 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 5 * x3 + 20 * x2 + 80 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x5 = xindex // 4 % 16 x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 5, 5), (25, 5, 1)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(80)](buf0, buf2, 80, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((5, 4, 1, 5, 1, 1), (20, 5, 5, 1, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(100)](primals_7, buf3, 100, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf4 = empty_strided_cuda((1, 16, 20), (320, 20, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 5), (0, 5, 1), 0), reinterpret_tensor(buf3, (1, 5, 20), (0, 20, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused_cat_0[grid(80)](buf1, buf5, 80, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 5, 4, 4, 1, 1), (80, 16, 4, 1, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(20, 16)](buf4, buf6, 20, 16, XBLOCK= 16, YBLOCK=32, num_warps=4, num_stages=1) del buf4 buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf6, (4, 5, 16), (80, 16, 1), 0), out=buf7) buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf8) del primals_10 del primals_9 buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_gelu_3[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf10) buf11 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 1, 16, 4), 0) del buf7 triton_poi_fused_add_4[grid(256)](buf11, buf10, primals_12, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf10 del primals_12 return buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), buf1, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf9, (16, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf5, (4, 5, 4), (20, 1, 5), 0 ), reinterpret_tensor(buf6, (4, 16, 5), (80, 1, 16), 0 ), reinterpret_tensor(buf2, (1, 5, 16), (80, 1, 5), 0 ), reinterpret_tensor(buf3, (1, 20, 5), (100, 1, 20), 0) class Biaffine(nn.Module): def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True): super(Biaffine, self).__init__() self.n_in = n_in self.n_out = n_out self.bias_x = bias_x self.bias_y = bias_y weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y))) nn.init.xavier_normal_(weight) self.weight = nn.Parameter(weight, requires_grad=True) def extra_repr(self): s = f'n_in={self.n_in}, n_out={self.n_out}' if self.bias_x: s += f', bias_x={self.bias_x}' if self.bias_y: s += f', bias_y={self.bias_y}' return s def forward(self, x, y): if self.bias_x: x = torch.cat((x, torch.ones_like(x[..., :1])), -1) if self.bias_y: y = torch.cat((y, torch.ones_like(y[..., :1])), -1) s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y) s = s.permute(0, 2, 3, 1) return s class MLP(nn.Module): def __init__(self, n_in, n_out, dropout=0): super().__init__() self.linear = nn.Linear(n_in, n_out) self.activation = nn.GELU() self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.dropout(x) x = self.linear(x) x = self.activation(x) return x class CoPredictorNew(nn.Module): def __init__(self, cls_num, hid_size, biaffine_size, channels, ffnn_hid_size, dropout=0): super().__init__() self.mlp1 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout) self.mlp2 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout) self.biaffine = Biaffine(n_in=biaffine_size, n_out=cls_num, bias_x= True, bias_y=True) self.mlp_rel = MLP(channels, ffnn_hid_size, dropout=dropout) self.linear = nn.Linear(ffnn_hid_size, cls_num) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1, input_2): primals_2 = self.mlp1.linear.weight primals_3 = self.mlp1.linear.bias primals_5 = self.mlp2.linear.weight primals_6 = self.mlp2.linear.bias primals_7 = self.biaffine.weight primals_9 = self.mlp_rel.linear.weight primals_10 = self.mlp_rel.linear.bias primals_11 = self.linear.weight primals_12 = self.linear.bias primals_1 = input_0 primals_4 = input_1 primals_8 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
dumpmemory/W2NER
CoPredictor
false
15,291
[ "MIT" ]
128
fb1b6eb1111eb001b1c965097d995244b840bdda
https://github.com/dumpmemory/W2NER/tree/fb1b6eb1111eb001b1c965097d995244b840bdda
ConvBlock
import math import torch from torch import Tensor from typing import List from typing import Optional from typing import Union from typing import Any from typing import Tuple from typing import NamedTuple import torch.nn as nn import torch.nn.functional as F class PaddedTensor(NamedTuple): data: 'torch.Tensor' sizes: 'torch.Tensor' @classmethod def build(cls, data: 'torch.Tensor', sizes: 'torch.Tensor'): assert isinstance(data, torch.Tensor) assert isinstance(sizes, torch.Tensor) assert sizes.dim() == 2, 'PaddedTensor.sizes must have 2 dimensions' assert sizes.size(1) in (2, 3 ), f'PaddedTensor.sizes is incorrect: expected=2 (HxW) or 3 (CxHxW), found={sizes.size(1)}' assert data.size(0) == sizes.size(0 ), f'Batch size {sizes.size(0)} does not match the number of samples in the batch {data.size(0)}' return cls(data, sizes) def __repr__(self) ->str: return ( f'PaddedTensor(data.size()={list(self.data.size())}, sizes={self.sizes.tolist()}, device={str(self.data.device)})' ) @property def device(self) ->torch.device: return self.data.device class ConvBlock(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: 'Param2d'=3, stride: 'Param2d'=1, dilation: 'Param2d'=1, activation: 'Optional[nn.Module]'=nn.LeakyReLU, poolsize: 'Param2d'=0, dropout: 'Optional[float]'=None, batchnorm: 'bool'=False, inplace: 'bool'= False, use_masks: 'bool'=False) ->None: super().__init__() ks, st, di, ps = ConvBlock.prepare_dimensional_args(kernel_size, stride, dilation, poolsize) if ps[0] * ps[1] < 2: ps = None self.dropout = dropout self.in_channels = in_channels self.use_masks = use_masks self.poolsize = ps self.conv = nn.Conv2d(in_channels, out_channels, ks, stride=st, padding=tuple((ks[dim] - 1) // 2 * di[dim] for dim in (0, 1)), dilation=di, bias=not batchnorm) self.batchnorm = nn.BatchNorm2d(out_channels) if batchnorm else None self.activation = activation(inplace=inplace) if activation else None self.pool = nn.MaxPool2d(ps) if self.poolsize else None @staticmethod def prepare_dimensional_args(*args: Any, dims: int=2) ->List[Tuple]: return [(tuple(arg) if isinstance(arg, (list, tuple)) else (arg,) * dims) for arg in args] def forward(self, x: 'Union[Tensor, PaddedTensor]') ->Union[Tensor, PaddedTensor]: x, xs = (x.data, x.sizes) if isinstance(x, PaddedTensor) else (x, None) assert x.size(1 ) == self.in_channels, f'Input image depth ({x.size(1)}) does not match the expected ({self.in_channels})' if self.dropout and 0.0 < self.dropout < 1.0: x = F.dropout(x, p=self.dropout, training=self.training) x = self.conv(x) if self.use_masks: x = mask_image_from_size(x, batch_sizes=xs, mask_value=0) if self.batchnorm: x = self.batchnorm(x) if self.activation: x = self.activation(x) if self.use_masks: x = mask_image_from_size(x, batch_sizes=xs, mask_value=0) if self.pool: x = self.pool(x) return x if xs is None else PaddedTensor.build(x, self. get_batch_output_size(xs)) def get_batch_output_size(self, xs: 'torch.Tensor') ->torch.Tensor: ys = torch.zeros_like(xs) for dim in (0, 1): ys[:, dim] = self.get_output_size(size=xs[:, dim], kernel_size= self.conv.kernel_size[dim], dilation=self.conv.dilation[dim ], stride=self.conv.stride[dim], poolsize=self.poolsize[dim ] if self.poolsize else None, padding=self.conv.padding[dim]) return ys @staticmethod def get_output_size(size: 'Union[torch.Tensor, int]', kernel_size: 'int', dilation: 'int', stride: 'int', poolsize: 'int', padding: 'Optional[int]'=None) ->Union[torch.LongTensor, int]: if padding is None: padding = (kernel_size - 1) // 2 * dilation size = size.float() if isinstance(size, torch.Tensor) else float(size) size = (size + 2 * padding - dilation * (kernel_size - 1) - 1 ) / stride + 1 size = size.floor() if isinstance(size, torch.Tensor) else math.floor( size) if poolsize: size /= poolsize return size.floor().long() if isinstance(size, torch.Tensor) else int( math.floor(size)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from typing import List from typing import Optional from typing import Union from typing import Any from typing import Tuple from typing import NamedTuple import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0, primals_3, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_3 return buf2, primals_1, primals_2, buf1 class PaddedTensor(NamedTuple): data: 'torch.Tensor' sizes: 'torch.Tensor' @classmethod def build(cls, data: 'torch.Tensor', sizes: 'torch.Tensor'): assert isinstance(data, torch.Tensor) assert isinstance(sizes, torch.Tensor) assert sizes.dim() == 2, 'PaddedTensor.sizes must have 2 dimensions' assert sizes.size(1) in (2, 3 ), f'PaddedTensor.sizes is incorrect: expected=2 (HxW) or 3 (CxHxW), found={sizes.size(1)}' assert data.size(0) == sizes.size(0 ), f'Batch size {sizes.size(0)} does not match the number of samples in the batch {data.size(0)}' return cls(data, sizes) def __repr__(self) ->str: return ( f'PaddedTensor(data.size()={list(self.data.size())}, sizes={self.sizes.tolist()}, device={str(self.data.device)})' ) @property def device(self) ->torch.device: return self.data.device class ConvBlockNew(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: 'Param2d'=3, stride: 'Param2d'=1, dilation: 'Param2d'=1, activation: 'Optional[nn.Module]'=nn.LeakyReLU, poolsize: 'Param2d'=0, dropout: 'Optional[float]'=None, batchnorm: 'bool'=False, inplace: 'bool'= False, use_masks: 'bool'=False) ->None: super().__init__() ks, st, di, ps = ConvBlockNew.prepare_dimensional_args(kernel_size, stride, dilation, poolsize) if ps[0] * ps[1] < 2: ps = None self.dropout = dropout self.in_channels = in_channels self.use_masks = use_masks self.poolsize = ps self.conv = nn.Conv2d(in_channels, out_channels, ks, stride=st, padding=tuple((ks[dim] - 1) // 2 * di[dim] for dim in (0, 1)), dilation=di, bias=not batchnorm) self.batchnorm = nn.BatchNorm2d(out_channels) if batchnorm else None self.activation = activation(inplace=inplace) if activation else None self.pool = nn.MaxPool2d(ps) if self.poolsize else None @staticmethod def prepare_dimensional_args(*args: Any, dims: int=2) ->List[Tuple]: return [(tuple(arg) if isinstance(arg, (list, tuple)) else (arg,) * dims) for arg in args] def get_batch_output_size(self, xs: 'torch.Tensor') ->torch.Tensor: ys = torch.zeros_like(xs) for dim in (0, 1): ys[:, dim] = self.get_output_size(size=xs[:, dim], kernel_size= self.conv.kernel_size[dim], dilation=self.conv.dilation[dim ], stride=self.conv.stride[dim], poolsize=self.poolsize[dim ] if self.poolsize else None, padding=self.conv.padding[dim]) return ys @staticmethod def get_output_size(size: 'Union[torch.Tensor, int]', kernel_size: 'int', dilation: 'int', stride: 'int', poolsize: 'int', padding: 'Optional[int]'=None) ->Union[torch.LongTensor, int]: if padding is None: padding = (kernel_size - 1) // 2 * dilation size = size.float() if isinstance(size, torch.Tensor) else float(size) size = (size + 2 * padding - dilation * (kernel_size - 1) - 1 ) / stride + 1 size = size.floor() if isinstance(size, torch.Tensor) else math.floor( size) if poolsize: size /= poolsize return size.floor().long() if isinstance(size, torch.Tensor) else int( math.floor(size)) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
eivtho/PyLaia
ConvBlock
false
15,292
[ "MIT" ]
89
2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
https://github.com/eivtho/PyLaia/tree/2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
NNAttention
import torch import torch.nn as nn class NNAttention(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.q_net = nn.Linear(in_dim, out_dim) self.k_net = nn.Linear(in_dim, out_dim) self.v_net = nn.Linear(in_dim, out_dim) def forward(self, Q, K, V): q = self.q_net(Q) k = self.k_net(K) v = self.v_net(V) attn = torch.einsum('ijk,ilk->ijl', q, k) attn = attn attn_prob = torch.softmax(attn, dim=-1) attn_vec = torch.einsum('ijk,ikl->ijl', attn_prob, v) return attn_vec def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf6) return buf6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) class NNAttentionNew(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.q_net = nn.Linear(in_dim, out_dim) self.k_net = nn.Linear(in_dim, out_dim) self.v_net = nn.Linear(in_dim, out_dim) def forward(self, input_0, input_1, input_2): primals_1 = self.q_net.weight primals_2 = self.q_net.bias primals_4 = self.k_net.weight primals_5 = self.k_net.bias primals_7 = self.v_net.weight primals_8 = self.v_net.bias primals_3 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
eitin-infant/FinRL-Meta
NNAttention
false
15,293
[ "MIT" ]
214
4c94011e58425796e7e2e5c1bf848afd65c828d6
https://github.com/eitin-infant/FinRL-Meta/tree/4c94011e58425796e7e2e5c1bf848afd65c828d6
LayerNorm
import torch import torch.fft import torch.nn import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, num_channels: 'int', eps: 'float'=1e-12): """Uses GroupNorm implementation with group=1 for speed.""" super().__init__() self.layer_norm = torch.nn.GroupNorm(1, num_channels=num_channels, eps=eps) def forward(self, x): return self.layer_norm(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.fft import torch.nn import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-12 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tl.store(out_ptr2 + (r1 + 64 * x0), tmp27, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_0[grid(4)](primals_3, primals_1, primals_2, buf0, buf3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 return buf3, primals_3, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0 ), reinterpret_tensor(buf4, (4, 1, 1), (1, 1, 1), 0) class LayerNormNew(nn.Module): def __init__(self, num_channels: 'int', eps: 'float'=1e-12): """Uses GroupNorm implementation with group=1 for speed.""" super().__init__() self.layer_norm = torch.nn.GroupNorm(1, num_channels=num_channels, eps=eps) def forward(self, input_0): primals_1 = self.layer_norm.weight primals_2 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dwromero/ckconv
LayerNorm
false
15,294
[ "MIT" ]
74
d44c6441a98792477d6259368c210089bb33fe7a
https://github.com/dwromero/ckconv/tree/d44c6441a98792477d6259368c210089bb33fe7a
SelfAttention
import torch import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, *args, **kargs): super().__init__() self.attention = nn.MultiheadAttention(*args, **kargs) def forward(self, x): return self.attention(x, x, x)[0] def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf9) del primals_5 return buf9, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, *args, **kargs): super().__init__() self.attention = nn.MultiheadAttention(*args, **kargs) def forward(self, input_0): primals_2 = self.attention.in_proj_weight primals_3 = self.attention.in_proj_bias primals_1 = self.attention.out_proj.weight primals_5 = self.attention.out_proj.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
eitin-infant/FinRL-Meta
SelfAttention
false
15,295
[ "MIT" ]
214
4c94011e58425796e7e2e5c1bf848afd65c828d6
https://github.com/eitin-infant/FinRL-Meta/tree/4c94011e58425796e7e2e5c1bf848afd65c828d6
ILN
import torch import torch.nn as nn import torch.utils.cpp_extension class ILN(nn.Module): def __init__(self, channels, resl, eps=1e-08): super().__init__() self.rho = nn.Parameter(torch.Tensor(1, channels, 1, 1)) self.rho.data.fill_(0.0) self.instance_norm = nn.InstanceNorm2d(channels, eps=eps, affine=False) self.layer_norm = nn.LayerNorm((channels, resl, resl), eps=eps, elementwise_affine=False) self.gamma = nn.Parameter(torch.Tensor(1, channels, 1, 1)) self.beta = nn.Parameter(torch.Tensor(1, channels, 1, 1)) self.gamma.data.fill_(1.0) self.beta.data.fill_(0.0) def forward(self, x): i_norm = self.instance_norm(x) l_norm = self.layer_norm(x) out = i_norm * self.rho.expand(x.size(0), -1, -1, -1) + l_norm * (1 - self.rho.expand(x.size(0), -1, -1, -1)) out = out * self.gamma.expand(x.size(0), -1, -1, -1 ) + self.beta.expand(x.size(0), -1, -1, -1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'resl': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-08 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-08 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp0 - tmp26 tmp29 = tmp27 * tmp28 tmp30 = 1.0 tmp31 = tmp30 - tmp24 tmp32 = tmp29 * tmp31 tmp33 = tmp25 + tmp32 tmp35 = tmp33 * tmp34 tmp37 = tmp35 + tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp37, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf7 = reinterpret_tensor(buf5, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf5 get_raw_stream(0) triton_per_fused_native_layer_norm_0[grid(4)](buf7, primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1[ grid(16)](buf3, primals_1, primals_2, buf4, buf7, primals_3, primals_4, buf0, buf8, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_4 return buf8, primals_1, primals_2, primals_3, buf0, buf3, buf4, buf7 class ILNNew(nn.Module): def __init__(self, channels, resl, eps=1e-08): super().__init__() self.rho = nn.Parameter(torch.Tensor(1, channels, 1, 1)) self.rho.data.fill_(0.0) self.instance_norm = nn.InstanceNorm2d(channels, eps=eps, affine=False) self.layer_norm = nn.LayerNorm((channels, resl, resl), eps=eps, elementwise_affine=False) self.gamma = nn.Parameter(torch.Tensor(1, channels, 1, 1)) self.beta = nn.Parameter(torch.Tensor(1, channels, 1, 1)) self.gamma.data.fill_(1.0) self.beta.data.fill_(0.0) def forward(self, input_0): primals_2 = self.rho primals_3 = self.gamma primals_4 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
STomoya/animeface
ILN
false
15,296
[ "MIT" ]
61
37b3cd26097d7874559d4c152e41e5712b7a1a42
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
ScalePredictor
import torch import torch.nn as nn class ScalePredictor(nn.Module): def __init__(self, nz, scale_lr_decay=0.2, scale_bias=1.0): super(ScalePredictor, self).__init__() self.pred_layer = nn.Linear(nz, 1) self.scale_bias = scale_bias self.scale_lr_decay = scale_lr_decay def forward(self, feat): scale = self.scale_lr_decay * self.pred_layer.forward(feat ) + self.scale_bias return scale def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nz': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.2 tmp5 = tmp3 * tmp4 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tl.store(in_out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class ScalePredictorNew(nn.Module): def __init__(self, nz, scale_lr_decay=0.2, scale_bias=1.0): super(ScalePredictorNew, self).__init__() self.pred_layer = nn.Linear(nz, 1) self.scale_bias = scale_bias self.scale_lr_decay = scale_lr_decay def forward(self, input_0): primals_1 = self.pred_layer.weight primals_2 = self.pred_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
eldar/acsm
ScalePredictor
false
15,297
[ "Apache-2.0" ]
52
04069e8bb4c12185473dc10c3355e5367fa98968
https://github.com/eldar/acsm/tree/04069e8bb4c12185473dc10c3355e5367fa98968
SpatialAttention2d
import torch import torch.nn as nn import torch._utils class SpatialAttention2d(nn.Module): def __init__(self, channel): super(SpatialAttention2d, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.squeeze(x) z = self.sigmoid(z) return x * z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](primals_2, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf0 class SpatialAttention2dNew(nn.Module): def __init__(self, channel): super(SpatialAttention2dNew, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.squeeze.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
elmajdma/seismic-deeplearning
SpatialAttention2d
false
15,298
[ "MIT" ]
270
bc084abe153509c40b45f8bf0f80dfda1049d7dc
https://github.com/elmajdma/seismic-deeplearning/tree/bc084abe153509c40b45f8bf0f80dfda1049d7dc
InputMapping
import math import torch import torch.fft import torch.nn class InputMapping(torch.nn.Conv1d): def __init__(self, in_channels: 'int', out_channels: 'int', omega_0: 'float', stride: 'int'=1, bias: 'bool'=True): super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, bias=bias) self.omega_0 = omega_0 self.weight.data.normal_(0.0, 2 * math.pi * self.omega_0) def forward(self, x): out = super().forward(x) out = torch.cat([torch.cos(out), torch.sin(out)], dim=1) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'omega_0': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.fft import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl_math.cos(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp12 = tl.load(in_ptr0 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.sin(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) return buf2, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf1 class InputMappingNew(torch.nn.Conv1d): def __init__(self, in_channels: 'int', out_channels: 'int', omega_0: 'float', stride: 'int'=1, bias: 'bool'=True): super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, bias=bias) self.omega_0 = omega_0 self.weight.data.normal_(0.0, 2 * math.pi * self.omega_0) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dwromero/ckconv
InputMapping
false
15,299
[ "MIT" ]
74
d44c6441a98792477d6259368c210089bb33fe7a
https://github.com/dwromero/ckconv/tree/d44c6441a98792477d6259368c210089bb33fe7a
CMVN
import torch import torch.onnx class CMVN(torch.nn.Module): eps = 1e-05 @torch.no_grad() def forward(self, feat): mean = feat.mean(dim=2, keepdim=True) std = feat.std(dim=2, keepdim=True) feat = (feat - mean) / (std + CMVN.eps) return feat def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tl.store(out_ptr0 + x3, tmp27, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CMVNNew(torch.nn.Module): eps = 1e-05 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
entn-at/Online-Speech-Recognition
CMVN
false
15,300
[ "Apache-2.0" ]
201
75680cef38c57d0ac60f5e23c90d24bb3046e4e7
https://github.com/entn-at/Online-Speech-Recognition/tree/75680cef38c57d0ac60f5e23c90d24bb3046e4e7
PatchEmbed3D
import torch import torch.utils.data from itertools import chain as chain import torch.nn as nn class PatchEmbed3D(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, temporal_resolution=4, in_chans=3, patch_size=16, z_block_size=2, embed_dim=768, flatten=True): super().__init__() self.height = img_size // patch_size self.width = img_size // patch_size self.frames = temporal_resolution // z_block_size self.num_patches = self.height * self.width * self.frames self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=( z_block_size, patch_size, patch_size), stride=(z_block_size, patch_size, patch_size)) self.flatten = flatten def forward(self, x): _B, _C, _T, _H, _W = x.shape x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 3, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from itertools import chain as chain import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 512 % 768 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (768, 3, 2, 16, 16), (1536, 512, 256, 16, 1)) assert_size_stride(primals_3, (768,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 16, 16), padding=(0, 0, 0), dilation=(1, 1, 1), transposed= False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 768, 32, 4, 4), (393216, 512, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(1572864)](buf1, primals_3, 1572864, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf1, (4, 512, 768), (393216, 1, 512), 0 ), primals_1, primals_2 class PatchEmbed3DNew(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, temporal_resolution=4, in_chans=3, patch_size=16, z_block_size=2, embed_dim=768, flatten=True): super().__init__() self.height = img_size // patch_size self.width = img_size // patch_size self.frames = temporal_resolution // z_block_size self.num_patches = self.height * self.width * self.frames self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=( z_block_size, patch_size, patch_size), stride=(z_block_size, patch_size, patch_size)) self.flatten = flatten def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dylan-campbell/Motionformer
PatchEmbed3D
false
15,301
[ "Apache-2.0" ]
153
6c860614a3b252c6163971ba20e61ea3184d5291
https://github.com/dylan-campbell/Motionformer/tree/6c860614a3b252c6163971ba20e61ea3184d5291
fChannelAttentionGG
import math import torch import numpy as np import torch.optim import torch.utils.data class fChannelAttentionGG(torch.nn.Module): def __init__(self, N_h_in, N_in, ratio=1, group='SE2'): super(fChannelAttentionGG, self).__init__() self.N_in = N_in self.ratio = ratio self.N_h_in = N_h_in self.N_h = N_h_in self.weight_fc1 = torch.nn.Parameter(torch.rand(self.N_in // ratio, self.N_in, self.N_h_in)) self.weight_fc2 = torch.nn.Parameter(torch.rand(self.N_in, self. N_in // ratio, self.N_h_in)) self.action = self._left_action_of_h_grid_se2 if group == 'E2': group = importlib.import_module('attgconv.group.' + group) e2_layers = attgconv.layers(group) n_grid = 8 self.h_grid = e2_layers.H.grid_global(n_grid) self.action = self._left_action_on_grid_e2 self.reset_parameters() def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight_fc1, a=math.sqrt(5)) torch.nn.init.kaiming_uniform_(self.weight_fc2, a=math.sqrt(5)) def forward(self, input): fc1, fc2 = self.action() input_mean = input.mean(dim=[-2, -1]).unsqueeze(-1) input_max = input.max(dim=-2)[0].max(dim=-1)[0].unsqueeze(-1) avg_out = self._linear(torch.relu(self._linear(input_mean, fc1)), fc2) max_out = self._linear(torch.relu(self._linear(input_max, fc1)), fc2) out = torch.sigmoid(avg_out + max_out) out = torch.reshape(out, [out.shape[0], self.N_in, self.N_h_in, 1, 1]) return out def _linear(self, input, w): in_reshaped = input.unsqueeze(-4).unsqueeze(-5) w_reshaped = torch.reshape(w, [1, w.shape[0], w.shape[1], w.shape[2 ], w.shape[3], 1]) output = (in_reshaped * w_reshaped).sum(dim=[-3, -2]) return output def _left_action_of_h_grid_se2(self): fc1 = torch.stack([self.weight_fc1.roll(shifts=i, dims=-1) for i in range(self.N_h)], dim=1) fc2 = torch.stack([self.weight_fc2.roll(shifts=i, dims=-1) for i in range(self.N_h)], dim=1) return fc1, fc2 def _left_action_on_grid_e2(self): fc1 = torch.stack([self._left_action_of_h_grid_e2(h, self. weight_fc1) for h in self.h_grid.grid], dim=1) fc2 = torch.stack([self._left_action_of_h_grid_e2(h, self. weight_fc2) for h in self.h_grid.grid], dim=1) return fc1, fc2 def _left_action_of_h_grid_e2(self, h, fx): shape = fx.shape Lgfx = fx.clone() Lgfx = torch.reshape(Lgfx, [shape[0], shape[1], 2, 4]) if h[0] != 0: Lgfx[:, :, 0, :] = torch.roll(Lgfx[:, :, 0, :], shifts=int( torch.round(1.0 / (np.pi / 2.0) * h[0]).item()), dims=-1) Lgfx[:, :, 1, :] = torch.roll(Lgfx[:, :, 1, :], shifts=-int( torch.round(1.0 / (np.pi / 2.0) * h[0]).item()), dims=-1) if h[-1] == -1: Lgfx = torch.roll(Lgfx, shifts=1, dims=-2) Lgfx = torch.reshape(Lgfx, shape) return Lgfx def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'N_h_in': 4, 'N_in': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import numpy as np import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = triton_helpers.maximum(tmp6, tmp13) tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = triton_helpers.maximum(tmp14, tmp21) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = triton_helpers.maximum(tmp25, tmp26) tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp22, tmp29) tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_per_fused_mul_relu_stack_sum_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex // 4 x0 = xindex % 4 r2 = rindex % 4 x1 = xindex // 4 r5 = rindex x4 = xindex tmp23 = tl.load(in_ptr1 + r5, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + r5, None, eviction_policy='evict_last') tmp0 = r3 + 4 * x0 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (r2 + 4 * (r3 + 4 * x0) + 16 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 * (-4 + r3 + 4 * x0) + 16 * x1 + (3 + r2) % 4), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * (-8 + r3 + 4 * x0) + 16 * x1 + (2 + r2) % 4), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1, 1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (4 * (-12 + r3 + 4 * x0) + 16 * x1 + (1 + r2) % 4), tmp16 & xmask, other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp24 = tmp23 * tmp22 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp30 = tmp29 * tmp22 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.where(xmask, tmp31, 0) tmp34 = tl.sum(tmp33, 1)[:, None] tmp35 = tl.full([1, 1], 0, tl.int32) tmp36 = triton_helpers.maximum(tmp35, tmp28) tmp37 = triton_helpers.maximum(tmp35, tmp34) tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp36, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x4, tmp37, xmask) @triton.jit def triton_per_fused_add_mul_sigmoid_sigmoid_backward_stack_sum_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex // 4 x0 = xindex % 4 r2 = rindex % 4 x1 = xindex // 4 r5 = rindex x4 = xindex tmp23 = tl.load(in_ptr1 + r5, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + r5, None, eviction_policy='evict_last') tmp0 = r3 + 4 * x0 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (r2 + 4 * (r3 + 4 * x0) + 16 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 * (-4 + r3 + 4 * x0) + 16 * x1 + (3 + r2) % 4), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * (-8 + r3 + 4 * x0) + 16 * x1 + (2 + r2) % 4), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1, 1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (4 * (-12 + r3 + 4 * x0) + 16 * x1 + (1 + r2) % 4), tmp16 & xmask, other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp24 = tmp23 * tmp22 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp30 = tmp29 * tmp22 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.where(xmask, tmp31, 0) tmp34 = tl.sum(tmp33, 1)[:, None] tmp35 = tmp28 + tmp34 tmp36 = tl.sigmoid(tmp35) tmp37 = 1.0 tmp38 = tmp37 - tmp36 tmp39 = tmp36 * tmp38 tl.store(out_ptr0 + (r5 + 16 * x4), tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp36, xmask) tl.store(out_ptr2 + x4, tmp39, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(16)](primals_3, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = buf2 del buf2 triton_per_fused_mean_1[grid(16)](buf4, primals_3, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_3 buf5 = empty_strided_cuda((1, 4, 4, 1), (16, 4, 1, 16), torch.float32) buf8 = empty_strided_cuda((1, 4, 4, 1), (16, 4, 1, 16), torch.float32) buf6 = reinterpret_tensor(buf5, (1, 4, 4, 1), (16, 4, 1, 1), 0) del buf5 buf9 = reinterpret_tensor(buf8, (1, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_per_fused_mul_relu_stack_sum_2[grid(16)](buf6, buf9, primals_1, buf4, buf3, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) buf10 = empty_strided_cuda((1, 4, 4, 1), (16, 4, 1, 16), torch.float32) buf11 = reinterpret_tensor(buf10, (1, 4, 4, 1), (16, 4, 1, 1), 0) del buf10 buf12 = empty_strided_cuda((1, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_per_fused_add_mul_sigmoid_sigmoid_backward_stack_sum_3[grid(16) ](buf11, primals_2, buf6, buf9, buf1, buf12, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 return reinterpret_tensor(buf11, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0 ), buf1, reinterpret_tensor(buf4, (1, 1, 4, 4, 1), (16, 16, 4, 1, 1), 0 ), buf6, reinterpret_tensor(buf3, (1, 1, 4, 4, 1), (16, 16, 4, 1, 1), 0 ), buf9, buf12 class fChannelAttentionGGNew(torch.nn.Module): def __init__(self, N_h_in, N_in, ratio=1, group='SE2'): super(fChannelAttentionGGNew, self).__init__() self.N_in = N_in self.ratio = ratio self.N_h_in = N_h_in self.N_h = N_h_in self.weight_fc1 = torch.nn.Parameter(torch.rand(self.N_in // ratio, self.N_in, self.N_h_in)) self.weight_fc2 = torch.nn.Parameter(torch.rand(self.N_in, self. N_in // ratio, self.N_h_in)) self.action = self._left_action_of_h_grid_se2 if group == 'E2': group = importlib.import_module('attgconv.group.' + group) e2_layers = attgconv.layers(group) n_grid = 8 self.h_grid = e2_layers.H.grid_global(n_grid) self.action = self._left_action_on_grid_e2 self.reset_parameters() def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight_fc1, a=math.sqrt(5)) torch.nn.init.kaiming_uniform_(self.weight_fc2, a=math.sqrt(5)) def _linear(self, input, w): in_reshaped = input.unsqueeze(-4).unsqueeze(-5) w_reshaped = torch.reshape(w, [1, w.shape[0], w.shape[1], w.shape[2 ], w.shape[3], 1]) output = (in_reshaped * w_reshaped).sum(dim=[-3, -2]) return output def _left_action_of_h_grid_se2(self): fc1 = torch.stack([self.weight_fc1.roll(shifts=i, dims=-1) for i in range(self.N_h)], dim=1) fc2 = torch.stack([self.weight_fc2.roll(shifts=i, dims=-1) for i in range(self.N_h)], dim=1) return fc1, fc2 def _left_action_on_grid_e2(self): fc1 = torch.stack([self._left_action_of_h_grid_e2(h, self. weight_fc1) for h in self.h_grid.grid], dim=1) fc2 = torch.stack([self._left_action_of_h_grid_e2(h, self. weight_fc2) for h in self.h_grid.grid], dim=1) return fc1, fc2 def _left_action_of_h_grid_e2(self, h, fx): shape = fx.shape Lgfx = fx.clone() Lgfx = torch.reshape(Lgfx, [shape[0], shape[1], 2, 4]) if h[0] != 0: Lgfx[:, :, 0, :] = torch.roll(Lgfx[:, :, 0, :], shifts=int( torch.round(1.0 / (np.pi / 2.0) * h[0]).item()), dims=-1) Lgfx[:, :, 1, :] = torch.roll(Lgfx[:, :, 1, :], shifts=-int( torch.round(1.0 / (np.pi / 2.0) * h[0]).item()), dims=-1) if h[-1] == -1: Lgfx = torch.roll(Lgfx, shifts=1, dims=-2) Lgfx = torch.reshape(Lgfx, shape) return Lgfx def forward(self, input_0): primals_1 = self.weight_fc1 primals_2 = self.weight_fc2 primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dwromero/att_gconvs
fChannelAttentionGG
false
15,302
[ "MIT" ]
53
872259cad49763fdcfa3e96e80b6b5c331adf084
https://github.com/dwromero/att_gconvs/tree/872259cad49763fdcfa3e96e80b6b5c331adf084
DurationMSELoss
import torch import torch.utils.data from torch.optim import * from torch.optim.lr_scheduler import * class DurationMSELoss(torch.nn.Module): """Loss function module for duration predictor. The loss value is Calculated in log domain to make it Gaussian. """ def __init__(self, offset=1.0, reduction='mean'): """Initilize duration predictor loss module. Args: offset (float, optional): Offset value to avoid nan in log domain. reduction (str): Reduction type in loss calculation. """ super().__init__() self.criterion = torch.nn.MSELoss(reduction=reduction) self.offset = offset def forward(self, outputs, targets): """Calculate forward propagation. Args: outputs (Tensor): Batch of prediction durations in log domain (B, T) targets (LongTensor): Batch of groundtruth durations in linear domain (B, T) Returns: Tensor: Mean squared error loss value. Note: `outputs` is in log domain but `targets` is in linear domain. """ targets = torch.log(targets.float() + self.offset) loss = self.criterion(outputs, targets) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = tmp0 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class DurationMSELossNew(torch.nn.Module): """Loss function module for duration predictor. The loss value is Calculated in log domain to make it Gaussian. """ def __init__(self, offset=1.0, reduction='mean'): """Initilize duration predictor loss module. Args: offset (float, optional): Offset value to avoid nan in log domain. reduction (str): Reduction type in loss calculation. """ super().__init__() self.criterion = torch.nn.MSELoss(reduction=reduction) self.offset = offset def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
entn-at/efficient_tts
DurationMSELoss
false
15,303
[ "MIT" ]
111
5e6ea55d0c9694f7e30eecb5048976088f1a3c66
https://github.com/entn-at/efficient_tts/tree/5e6ea55d0c9694f7e30eecb5048976088f1a3c66
Classifier
import torch import torch.nn.functional as F from torch import nn class Classifier(nn.Module): def __init__(self, dims): """ Single hidden layer classifier with softmax output. """ super(Classifier, self).__init__() [x_dim, h_dim, y_dim] = dims self.dense = nn.Linear(x_dim, h_dim) self.logits = nn.Linear(h_dim, y_dim) def forward(self, x): x = F.relu(self.dense(x)) x = F.softmax(self.logits(x), dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dims': [4, 4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5 class ClassifierNew(nn.Module): def __init__(self, dims): """ Single hidden layer classifier with softmax output. """ super(ClassifierNew, self).__init__() [x_dim, h_dim, y_dim] = dims self.dense = nn.Linear(x_dim, h_dim) self.logits = nn.Linear(h_dim, y_dim) def forward(self, input_0): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_4 = self.logits.weight primals_5 = self.logits.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
engdorm/semi-supervised-pytorch
Classifier
false
15,304
[ "MIT" ]
700
b149e06aa413dd426886149930c8c265fd9cc746
https://github.com/engdorm/semi-supervised-pytorch/tree/b149e06aa413dd426886149930c8c265fd9cc746
Gate
import torch import torch.nn as nn import torch.nn.functional as F class Gate(nn.Module): def __init__(self, hidden_size): super(Gate, self).__init__() self.hidden_size = hidden_size self.wrx = nn.Linear(hidden_size, hidden_size) self.wrh = nn.Linear(hidden_size, hidden_size) self.wix = nn.Linear(hidden_size, hidden_size) self.wih = nn.Linear(hidden_size, hidden_size) self.wnx = nn.Linear(hidden_size, hidden_size) self.wnh = nn.Linear(hidden_size, hidden_size) def forward(self, title, pg): r_gate = F.sigmoid(self.wrx(title) + self.wrh(pg)) i_gate = F.sigmoid(self.wix(title) + self.wih(pg)) n_gate = F.tanh(self.wnx(title) + torch.mul(r_gate, self.wnh(pg))) result = torch.mul(i_gate, pg) + torch.mul(torch.add(-i_gate, 1), n_gate) return result def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_neg_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_out_ptr1 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, xmask) tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr6 + x2, xmask) tmp21 = tl.load(in_ptr7 + x2, xmask) tmp22 = tl.load(in_ptr8 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = -tmp15 tmp19 = 1.0 tmp20 = tmp18 + tmp19 tmp23 = tmp7 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = libdevice.tanh(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp17 + tmp26 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(in_out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr0 + x2, tmp27, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) del primals_7 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf4) del primals_9 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_11 del primals_12 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_13 del primals_14 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_neg_sigmoid_tanh_0[grid(256)](buf2, buf5, primals_2, buf1, primals_5, primals_8, buf4, primals_10, primals_6, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf4 del primals_10 del primals_2 del primals_5 del primals_8 return buf8, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, buf6, buf7 class GateNew(nn.Module): def __init__(self, hidden_size): super(GateNew, self).__init__() self.hidden_size = hidden_size self.wrx = nn.Linear(hidden_size, hidden_size) self.wrh = nn.Linear(hidden_size, hidden_size) self.wix = nn.Linear(hidden_size, hidden_size) self.wih = nn.Linear(hidden_size, hidden_size) self.wnx = nn.Linear(hidden_size, hidden_size) self.wnh = nn.Linear(hidden_size, hidden_size) def forward(self, input_0, input_1): primals_1 = self.wrx.weight primals_2 = self.wrx.bias primals_4 = self.wrh.weight primals_5 = self.wrh.bias primals_7 = self.wix.weight primals_8 = self.wix.bias primals_9 = self.wih.weight primals_10 = self.wih.bias primals_11 = self.wnx.weight primals_12 = self.wnx.bias primals_13 = self.wnh.weight primals_14 = self.wnh.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
elsehow/Writing-editing-Network
Gate
false
15,305
[ "MIT" ]
79
a8551cd224a4987a6eec3cf566bcf0793ad36dfd
https://github.com/elsehow/Writing-editing-Network/tree/a8551cd224a4987a6eec3cf566bcf0793ad36dfd
SquaredModulus
import torch from torch import nn class SquaredModulus(nn.Module): """Squared modulus layer. Returns a keras layer that implements a squared modulus operator. To implement the squared modulus of C complex-valued channels, the expected input dimension is N*1*W*(2*C) where channels role alternates between real and imaginary part. The way the squared modulus is computed is real ** 2 + imag ** 2 as follows: - squared operator on real and imag - average pooling to compute (real ** 2 + imag ** 2) / 2 - multiply by 2 Attributes: pool: average-pooling function over the channel dimensions """ def __init__(self): super(SquaredModulus, self).__init__() self._pool = nn.AvgPool1d(kernel_size=2, stride=2) def forward(self, x): x = torch.transpose(x, 2, 1) output = 2 * self._pool(x ** 2) return torch.transpose(output, 2, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_transpose_0(in_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex yindex % 2 yindex // 2 tmp0 = tl.load(in_ptr0 + (x2 + 8 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x2 + 8 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp3 + tmp1 tmp5 = 0.5 tmp6 = tmp4 * tmp5 tmp7 = 2.0 tmp8 = tmp6 * tmp7 tl.store(out_ptr1 + (x2 + 4 * y3), tmp8, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_transpose_0[grid(8, 4)](arg0_1, buf1, 8, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del arg0_1 return buf1, class SquaredModulusNew(nn.Module): """Squared modulus layer. Returns a keras layer that implements a squared modulus operator. To implement the squared modulus of C complex-valued channels, the expected input dimension is N*1*W*(2*C) where channels role alternates between real and imaginary part. The way the squared modulus is computed is real ** 2 + imag ** 2 as follows: - squared operator on real and imag - average pooling to compute (real ** 2 + imag ** 2) / 2 - multiply by 2 Attributes: pool: average-pooling function over the channel dimensions """ def __init__(self): super(SquaredModulusNew, self).__init__() self._pool = nn.AvgPool1d(kernel_size=2, stride=2) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
entn-at/leaf-audio-pytorch
SquaredModulus
false
15,306
[ "Apache-2.0" ]
72
33f4ba4c8bdf07f125033f8e706d0d0bc6816445
https://github.com/entn-at/leaf-audio-pytorch/tree/33f4ba4c8bdf07f125033f8e706d0d0bc6816445
VariantSigmoid
import torch import torch.nn as nn class VariantSigmoid(nn.Module): def __init__(self, alpha): super().__init__() self.alpha = alpha def forward(self, x): y = 1 / (1 + torch.exp(-self.alpha * x)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -4.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp4 = 1.0 tmp5 = tmp3 + tmp4 tmp6 = tl.full([1], 1, tl.int32) tmp7 = tmp6 / tmp5 tmp8 = tmp7 * tmp4 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class VariantSigmoidNew(nn.Module): def __init__(self, alpha): super().__init__() self.alpha = alpha def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
entn-at/AGAIN-VC
VariantSigmoid
false
15,307
[ "MIT" ]
78
dbf94bf55882f897c312c7760cd892c51c93c9ab
https://github.com/entn-at/AGAIN-VC/tree/dbf94bf55882f897c312c7760cd892c51c93c9ab
ClassificationTestModel
from torch.nn import Module import torch import torch.nn as nn from typing import Any from torch.nn.modules import Module class ClassificationTestModel(Module): def __init__(self, in_chans: 'int'=3, num_classes: 'int'=1000, **kwargs: Any) ->None: super().__init__() self.conv1 = nn.Conv2d(in_channels=in_chans, out_channels=1, kernel_size=1) self.pool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(1, num_classes) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv1(x) x = self.pool(x) x = torch.flatten(x, 1) x = self.fc(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn as nn from typing import Any from torch.nn.modules import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_convolution_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 4 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) _tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tmp0 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = _tmp5 + tmp4 _tmp5 = tl.where(rmask & xmask, tmp6, _tmp5) tmp5 = tl.sum(_tmp5, 1)[:, None] tmp7 = 4096.0 tmp8 = tmp5 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (1000, 1), (1, 1)) assert_size_stride(primals_5, (1000,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_red_fused_convolution_mean_0[grid(4)](buf2, buf0, primals_2, 4, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del buf0 del primals_2 buf3 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (4, 1), (1, 0), 0), reinterpret_tensor(primals_4, (1, 1000), (1, 1), 0), alpha=1, beta=1, out=buf3) del primals_5 return buf3, primals_1, primals_3, reinterpret_tensor(buf2, (4, 1), (1, 1), 0), primals_4 class ClassificationTestModelNew(Module): def __init__(self, in_chans: 'int'=3, num_classes: 'int'=1000, **kwargs: Any) ->None: super().__init__() self.conv1 = nn.Conv2d(in_channels=in_chans, out_channels=1, kernel_size=1) self.pool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(1, num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ethanwhite/torchgeo
ClassificationTestModel
false
15,308
[ "MIT" ]
678
cb20e1abfd9213f9ee7700df972385db13568642
https://github.com/ethanwhite/torchgeo/tree/cb20e1abfd9213f9ee7700df972385db13568642
Upsample
import torch from torch import nn import torch.utils.data class Upsample(nn.Module): def __init__(self, dim): super().__init__() self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(1024)](buf1, primals_2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class UpsampleNew(nn.Module): def __init__(self, dim): super().__init__() self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
entn-at/GradTTS
Upsample
false
15,309
[ "MIT" ]
55
d31cbf41211615a01fffc3812715e3f7f2be214d
https://github.com/entn-at/GradTTS/tree/d31cbf41211615a01fffc3812715e3f7f2be214d
SCse
import torch import torch.nn as nn import torch._utils class SpatialAttention2d(nn.Module): def __init__(self, channel): super(SpatialAttention2d, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.squeeze(x) z = self.sigmoid(z) return x * z class GAB(nn.Module): def __init__(self, input_dim, reduction=4): super(GAB, self).__init__() self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1) self.conv2 = nn.Conv2d(input_dim // reduction, input_dim, kernel_size=1, stride=1) self.relu = nn.ReLU(inplace=True) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.global_avgpool(x) z = self.relu(self.conv1(z)) z = self.sigmoid(self.conv2(z)) return x * z class SCse(nn.Module): def __init__(self, dim): super(SCse, self).__init__() self.satt = SpatialAttention2d(dim) self.catt = GAB(dim) def forward(self, x): return self.satt(x) + self.catt(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x4 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp0 * tmp5 tmp7 = tmp3 + tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf1 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf2, primals_2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(4)](buf4, primals_4, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(16)](buf6, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_3[grid(256)](primals_2, buf0, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) return (buf7, primals_1, primals_2, primals_3, primals_5, buf0, buf2, buf4, buf6) class SpatialAttention2d(nn.Module): def __init__(self, channel): super(SpatialAttention2d, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.squeeze(x) z = self.sigmoid(z) return x * z class GAB(nn.Module): def __init__(self, input_dim, reduction=4): super(GAB, self).__init__() self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1) self.conv2 = nn.Conv2d(input_dim // reduction, input_dim, kernel_size=1, stride=1) self.relu = nn.ReLU(inplace=True) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.global_avgpool(x) z = self.relu(self.conv1(z)) z = self.sigmoid(self.conv2(z)) return x * z class SCseNew(nn.Module): def __init__(self, dim): super(SCseNew, self).__init__() self.satt = SpatialAttention2d(dim) self.catt = GAB(dim) def forward(self, input_0): primals_1 = self.satt.squeeze.weight primals_3 = self.catt.conv1.weight primals_4 = self.catt.conv1.bias primals_5 = self.catt.conv2.weight primals_6 = self.catt.conv2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
elmajdma/seismic-deeplearning
SCse
false
15,310
[ "MIT" ]
270
bc084abe153509c40b45f8bf0f80dfda1049d7dc
https://github.com/elmajdma/seismic-deeplearning/tree/bc084abe153509c40b45f8bf0f80dfda1049d7dc
Model
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.linear1 = nn.Linear(28 * 28, 32) self.linear2 = nn.Linear(32, 10) def forward(self, inputs): x = inputs.view(-1, 28 * 28) x = F.relu(self.linear1(x)) x = self.linear2(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (32, 784), (784, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (10, 32), (32, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 32 ), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(128)](buf1, primals_3, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (32, 10), (1, 32), 0), alpha=1, beta=1, out=buf2) del primals_5 buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_1[grid(4)](buf2, buf5, 4, 10, XBLOCK= 1, num_warps=2, num_stages=1) del buf2 return buf5, primals_1, buf1, buf5, primals_4 class ModelNew(nn.Module): def __init__(self): super(ModelNew, self).__init__() self.linear1 = nn.Linear(28 * 28, 32) self.linear2 = nn.Linear(32, 10) def forward(self, input_0): primals_2 = self.linear1.weight primals_3 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
emirojaseng/pytorch-meta-optimizer
Model
false
15,311
[ "MIT" ]
298
3641981c990150ceb6c55d25a05ba76388f9ec69
https://github.com/emirojaseng/pytorch-meta-optimizer/tree/3641981c990150ceb6c55d25a05ba76388f9ec69
LayerNorm1D
import torch import torch.nn as nn class LayerNorm1D(nn.Module): def __init__(self, num_outputs, eps=1e-05, affine=True): super(LayerNorm1D, self).__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(1, num_outputs)) self.bias = nn.Parameter(torch.zeros(1, num_outputs)) def forward(self, inputs): input_mean = inputs.mean(1, keepdim=True).expand_as(inputs) input_std = inputs.std(1, keepdim=True).expand_as(inputs) x = (inputs - input_mean) / (input_std + self.eps) return x * self.weight.expand_as(x) + self.bias.expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_outputs': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = xindex // 64 x5 = xindex % 16 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNorm1DNew(nn.Module): def __init__(self, num_outputs, eps=1e-05, affine=True): super(LayerNorm1DNew, self).__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(1, num_outputs)) self.bias = nn.Parameter(torch.zeros(1, num_outputs)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
emirojaseng/pytorch-meta-optimizer
LayerNorm1D
false
15,312
[ "MIT" ]
298
3641981c990150ceb6c55d25a05ba76388f9ec69
https://github.com/emirojaseng/pytorch-meta-optimizer/tree/3641981c990150ceb6c55d25a05ba76388f9ec69
QRLoss
from torch.nn import Module import torch from typing import cast from torch.nn.modules import Module class QRLoss(Module): """The QR (forward) loss between class probabilities and predictions. This loss is defined in `'Resolving label uncertainty with implicit generative models' <https://openreview.net/forum?id=AEa_UepnMDX>`_. .. versionadded:: 0.2 """ def forward(self, probs: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """Computes the QR (forwards) loss on prior. Args: probs: probabilities of predictions, expected shape B x C x H x W. target: prior probabilities, expected shape B x C x H x W. Returns: qr loss """ q = probs q_bar = q.mean(dim=(0, 2, 3)) qbar_log_S = (q_bar * torch.log(q_bar)).sum() q_log_p = torch.einsum('bcxy,bcxy->bxy', q, torch.log(target)).mean() loss = qbar_log_S - q_log_p return cast(torch.Tensor, loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn.modules import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_per_fused_log_mean_mul_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 64.0 tmp2 = tmp0 / tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl_math.log(tmp0) tl.store(out_ptr0 + (x2 + 4 * y3), tmp1, xmask & ymask) @triton.jit def triton_per_fused_mean_sub_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_out_ptr0 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp6 = 64.0 tmp7 = tmp3 / tmp6 tmp8 = tmp5 - tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((), (), torch.float32) triton_per_fused_log_mean_mul_sum_1[grid(1)](buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](arg0_1, buf2, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64, 4)](arg1_1, buf3, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg1_1 buf4 = empty_strided_cuda((64, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (64, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4) del buf2 del buf3 buf6 = buf1 del buf1 triton_per_fused_mean_sub_4[grid(1)](buf6, buf4, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf4 return buf6, class QRLossNew(Module): """The QR (forward) loss between class probabilities and predictions. This loss is defined in `'Resolving label uncertainty with implicit generative models' <https://openreview.net/forum?id=AEa_UepnMDX>`_. .. versionadded:: 0.2 """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ethanwhite/torchgeo
QRLoss
false
15,313
[ "MIT" ]
678
cb20e1abfd9213f9ee7700df972385db13568642
https://github.com/ethanwhite/torchgeo/tree/cb20e1abfd9213f9ee7700df972385db13568642
SelfAttn
import torch from torch import nn from torch.nn import functional as F class SelfAttn(nn.Module): """ self-attention with learnable parameters """ def __init__(self, dhid): super().__init__() self.scorer = nn.Linear(dhid, 1) def forward(self, inp): scores = F.softmax(self.scorer(inp), dim=1) cont = scores.transpose(1, 2).bmm(inp).squeeze(1) return cont def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dhid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), primals_3, out=buf4) del buf3 return reinterpret_tensor(buf4, (4, 4), (4, 1), 0), primals_3, buf1 class SelfAttnNew(nn.Module): """ self-attention with learnable parameters """ def __init__(self, dhid): super().__init__() self.scorer = nn.Linear(dhid, 1) def forward(self, input_0): primals_1 = self.scorer.weight primals_2 = self.scorer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
etaoxing/crl_alfred
SelfAttn
false
15,314
[ "MIT" ]
148
cad500cf84f71e47f1191e7810dde0c74d295f08
https://github.com/etaoxing/crl_alfred/tree/cad500cf84f71e47f1191e7810dde0c74d295f08
RQLoss
from torch.nn import Module import torch from typing import cast from torch.nn.modules import Module import torch.nn.functional as F class RQLoss(Module): """The RQ (backwards) loss between class probabilities and predictions. This loss is defined in `'Resolving label uncertainty with implicit generative models' <https://openreview.net/forum?id=AEa_UepnMDX>`_. .. versionadded:: 0.2 """ def forward(self, probs: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """Computes the RQ (backwards) loss on prior. Args: probs: probabilities of predictions, expected shape B x C x H x W target: prior probabilities, expected shape B x C x H x W Returns: qr loss """ q = probs z = q / q.norm(p=1, dim=(0, 2, 3), keepdim=True).clamp_min(1e-12 ).expand_as(q) r = F.normalize(z * target, p=1, dim=1) loss = torch.einsum('bcxy,bcxy->bxy', r, torch.log(r) - torch.log(q) ).mean() return cast(torch.Tensor, loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn.modules import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 16 r2 = rindex // 16 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + 1) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr1 + 2) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp23 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr1 + 3) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp32 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp7 = tmp5 * tmp6 tmp8 = tl_math.abs(tmp7) tmp12 = triton_helpers.maximum(tmp11, tmp3) tmp13 = tmp9 / tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl_math.abs(tmp15) tmp17 = tmp8 + tmp16 tmp21 = triton_helpers.maximum(tmp20, tmp3) tmp22 = tmp18 / tmp21 tmp24 = tmp22 * tmp23 tmp25 = tl_math.abs(tmp24) tmp26 = tmp17 + tmp25 tmp30 = triton_helpers.maximum(tmp29, tmp3) tmp31 = tmp27 / tmp30 tmp33 = tmp31 * tmp32 tmp34 = tl_math.abs(tmp33) tmp35 = tmp26 + tmp34 tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x2 + 16 * y3), xmask & ymask) tmp7 = tl.load(in_ptr3 + (x2 + 16 * y1), xmask & ymask, eviction_policy ='evict_last') tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp6 = tmp4 * tmp5 tmp8 = triton_helpers.maximum(tmp7, tmp2) tmp9 = tmp6 / tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tl_math.log(tmp0) tmp12 = tmp10 - tmp11 tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp9, xmask & ymask) tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp12, xmask & ymask) @triton.jit def triton_per_fused_mean_3(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 64.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_linalg_vector_norm_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused_div_linalg_vector_norm_mul_1[grid(64)](arg0_1, buf0, arg1_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 16)](arg0_1, buf0, arg1_1, buf1, buf2, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1 ) del arg0_1 del arg1_1 del buf0 buf4 = reinterpret_tensor(buf1, (64, 1, 1), (1, 1, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (64, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4) del buf2 del buf3 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 triton_per_fused_mean_3[grid(1)](buf6, buf4, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf4 return buf6, class RQLossNew(Module): """The RQ (backwards) loss between class probabilities and predictions. This loss is defined in `'Resolving label uncertainty with implicit generative models' <https://openreview.net/forum?id=AEa_UepnMDX>`_. .. versionadded:: 0.2 """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ethanwhite/torchgeo
RQLoss
false
15,315
[ "MIT" ]
678
cb20e1abfd9213f9ee7700df972385db13568642
https://github.com/ethanwhite/torchgeo/tree/cb20e1abfd9213f9ee7700df972385db13568642
SegmentationTestModel
from torch.nn import Module import torch import torch.nn as nn from typing import Any from typing import cast from torch.nn.modules import Module class SegmentationTestModel(Module): def __init__(self, in_channels: 'int'=3, classes: 'int'=1000, **kwargs: Any ) ->None: super().__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= classes, kernel_size=1, padding=0) def forward(self, x: 'torch.Tensor') ->torch.Tensor: return cast(torch.Tensor, self.conv1(x)) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn as nn from typing import Any from torch.nn.modules import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4000 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 1000 y1 = yindex // 1000 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 1000 * x2 + 4096000 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1000, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_2, (1000,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 4096)](primals_3, buf0, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1000, 64, 64), (4096000, 1, 64000, 1000)) buf2 = empty_strided_cuda((4, 1000, 64, 64), (4096000, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_1[grid(4000, 4096)](buf1, primals_2, buf2, 4000, 4096, XBLOCK=16, YBLOCK=256, num_warps=8, num_stages=1) del buf1 del primals_2 return buf2, primals_1, buf0 class SegmentationTestModelNew(Module): def __init__(self, in_channels: 'int'=3, classes: 'int'=1000, **kwargs: Any ) ->None: super().__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= classes, kernel_size=1, padding=0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ethanwhite/torchgeo
SegmentationTestModel
false
15,316
[ "MIT" ]
678
cb20e1abfd9213f9ee7700df972385db13568642
https://github.com/ethanwhite/torchgeo/tree/cb20e1abfd9213f9ee7700df972385db13568642
InvConvNear
import torch from torch import nn from torch.nn import functional as F import torch.utils.data class InvConvNear(nn.Module): def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs): super().__init__() assert n_split % 2 == 0 self.channels = channels self.n_split = n_split self.no_jacobian = no_jacobian w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split). normal_())[0] if torch.det(w_init) < 0: w_init[:, 0] = -1 * w_init[:, 0] self.weight = nn.Parameter(w_init) def forward(self, x, x_mask=None, reverse=False, **kwargs): b, c, t = x.size() assert c % self.n_split == 0 if x_mask is None: x_mask = 1 x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t else: x_len = torch.sum(x_mask, [1, 2]) x = x.view(b, 2, c // self.n_split, self.n_split // 2, t) x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t) if reverse: if hasattr(self, 'weight_inv'): weight = self.weight_inv else: weight = torch.inverse(self.weight.float()) logdet = None else: weight = self.weight if self.no_jacobian: logdet = 0 else: logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len weight = weight.view(self.n_split, self.n_split, 1, 1) z = F.conv2d(x, weight) z = z.view(b, 2, self.n_split // 2, c // self.n_split, t) z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask return z, logdet def store_inverse(self): self.weight_inv = torch.inverse(self.weight.float()) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = -1.0 tmp3 = tmp1 == tmp2 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp3, None) @triton.jit def triton_poi_fused_mul_scalar_tensor_where_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0).to(tl.int1) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = float('nan') tmp5 = tl.where(tmp1, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = 4.0 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (1, 4)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._linalg_slogdet.default(primals_2) buf1 = buf0[0] buf2 = buf0[1] buf3 = buf0[2] buf4 = buf0[3] del buf0 buf5 = empty_strided_cuda((), (), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(1)](buf1, buf5, 1, XBLOCK=1, num_warps=1, num_stages=1) del buf1 buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_scalar_tensor_where_1[grid(4)](buf5, buf2, buf6, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf2 buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_convolution_2[grid(4, 4)](primals_2, buf7, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf8 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4, 4, 1), 0), buf7, stride=(1, 1), padding=(0, 0 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 1, 4), (16, 4, 4, 1)) del buf7 buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0) del buf8 triton_poi_fused_mul_3[grid(64)](buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf9, buf6, reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4, 8, 1), 0), buf3, buf4, buf5, reinterpret_tensor(primals_2, (4, 4, 1, 1), (1, 4, 4, 4), 0) class InvConvNearNew(nn.Module): def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs): super().__init__() assert n_split % 2 == 0 self.channels = channels self.n_split = n_split self.no_jacobian = no_jacobian w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split). normal_())[0] if torch.det(w_init) < 0: w_init[:, 0] = -1 * w_init[:, 0] self.weight = nn.Parameter(w_init) def store_inverse(self): self.weight_inv = torch.inverse(self.weight.float()) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
entn-at/GradTTS
InvConvNear
false
15,317
[ "MIT" ]
55
d31cbf41211615a01fffc3812715e3f7f2be214d
https://github.com/entn-at/GradTTS/tree/d31cbf41211615a01fffc3812715e3f7f2be214d
GaborConstraint
import math import torch from torch import nn class GaborConstraint(nn.Module): """Constraint mu and sigma, in radians. Mu is constrained in [0,pi], sigma s.t full-width at half-maximum of the gaussian response is in [1,pi/2]. The full-width at half maximum of the Gaussian response is 2*sqrt(2*log(2))/sigma . See Section 2.2 of https://arxiv.org/pdf/1711.01161.pdf for more details. """ def __init__(self, kernel_size): """Initialize kernel size. Args: kernel_size: the length of the filter, in samples. """ super(GaborConstraint, self).__init__() self._kernel_size = kernel_size def forward(self, kernel): mu_lower = 0.0 mu_upper = math.pi sigma_lower = 4 * math.sqrt(2 * math.log(2)) / math.pi sigma_upper = self._kernel_size * math.sqrt(2 * math.log(2)) / math.pi clipped_mu = torch.clamp(kernel[:, 0], mu_lower, mu_upper) clipped_sigma = torch.clamp(kernel[:, 1], sigma_lower, sigma_upper) return torch.stack([clipped_mu, clipped_sigma], dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 3.141592653589793 tmp9 = triton_helpers.minimum(tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp12 & xmask, other=0.0) tmp16 = 1.4991250010342207 tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp18 = triton_helpers.minimum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 2, 4, 4), (32, 16, 4, 1), 0), class GaborConstraintNew(nn.Module): """Constraint mu and sigma, in radians. Mu is constrained in [0,pi], sigma s.t full-width at half-maximum of the gaussian response is in [1,pi/2]. The full-width at half maximum of the Gaussian response is 2*sqrt(2*log(2))/sigma . See Section 2.2 of https://arxiv.org/pdf/1711.01161.pdf for more details. """ def __init__(self, kernel_size): """Initialize kernel size. Args: kernel_size: the length of the filter, in samples. """ super(GaborConstraintNew, self).__init__() self._kernel_size = kernel_size def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
entn-at/leaf-audio-pytorch
GaborConstraint
false
15,318
[ "Apache-2.0" ]
72
33f4ba4c8bdf07f125033f8e706d0d0bc6816445
https://github.com/entn-at/leaf-audio-pytorch/tree/33f4ba4c8bdf07f125033f8e706d0d0bc6816445
CausalConv1d
import torch import torch.nn as nn class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=kernel_size) def forward(self, x): x = self.conv(x) return x[:, :, :-self.kernel_size] def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(4,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 9), (36, 9, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(144)](buf1, primals_2, 144, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 5), (36, 9, 1), 0 ), primals_1, primals_3 class CausalConv1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=kernel_size) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ex4sperans/freesound-classification
CausalConv1d
false
15,319
[ "Apache-2.0" ]
55
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
https://github.com/ex4sperans/freesound-classification/tree/71b9920ce0ae376aa7f1a3a2943f0f92f4820813
Conv1dLinear
import torch import torch.utils.data from torch.optim import * from torch.optim.lr_scheduler import * class Conv1dLinear(torch.nn.Module): """Conv1D + Linear for Transformer block. A variant of MultiLayeredConv1d, which replaces second conv-layer to linear. """ def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate): """Initialize Conv1dLinear module. Args: in_chans (int): Number of input channels. hidden_chans (int): Number of hidden channels. kernel_size (int): Kernel size of conv1d. dropout_rate (float): Dropout rate. """ super(Conv1dLinear, self).__init__() self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.w_2 = torch.nn.Linear(hidden_chans, in_chans) self.dropout = torch.nn.Dropout(dropout_rate) def forward(self, x): """Calculate forward propagation. Args: x (Tensor): Batch of input tensors (B, ..., in_chans). Returns: Tensor: Batch of output tensors (B, ..., hidden_chans). """ x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1) return self.w_2(self.dropout(x)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 3), (12, 3, 1)) del buf0 buf2 = empty_strided_cuda((4, 3, 4), (12, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(12, 4)](buf1, primals_3, buf2, 12, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((12, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (12, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 3, 4), (12, 4, 1), 0) del buf3 triton_poi_fused_add_2[grid(48)](buf4, primals_5, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(48)](buf1, primals_3, buf5, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del primals_3 return buf4, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (12, 4), (4, 1), 0), primals_4, buf5 class Conv1dLinearNew(torch.nn.Module): """Conv1D + Linear for Transformer block. A variant of MultiLayeredConv1d, which replaces second conv-layer to linear. """ def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate): """Initialize Conv1dLinear module. Args: in_chans (int): Number of input channels. hidden_chans (int): Number of hidden channels. kernel_size (int): Kernel size of conv1d. dropout_rate (float): Dropout rate. """ super(Conv1dLinearNew, self).__init__() self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.w_2 = torch.nn.Linear(hidden_chans, in_chans) self.dropout = torch.nn.Dropout(dropout_rate) def forward(self, input_0): primals_1 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
entn-at/efficient_tts
Conv1dLinear
false
15,320
[ "MIT" ]
111
5e6ea55d0c9694f7e30eecb5048976088f1a3c66
https://github.com/entn-at/efficient_tts/tree/5e6ea55d0c9694f7e30eecb5048976088f1a3c66
BahdanauAttention
import math import torch import torch.nn as nn import torch.nn.functional as F from random import * class BahdanauAttention(nn.Module): def __init__(self, hidden_size): super().__init__() self.hidden_size = hidden_size self.w1 = nn.Linear(hidden_size, hidden_size) self.w2 = nn.Linear(hidden_size, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size * 2)) stdv = 1.0 / math.sqrt(hidden_size) self.v.data.normal_(mean=0, std=stdv) def forward(self, hidden, encoder_outputs): src_len = encoder_outputs.shape[0] hidden = hidden.expand(src_len, -1, -1) hidden_energy = self.w1(hidden) encoder_outputs_energy = self.w2(encoder_outputs) energy = torch.cat((hidden_energy, encoder_outputs_energy), dim=2) v = self.v * energy v = torch.sum(v, dim=2) attention_energies = F.softmax(v, dim=0) return attention_energies def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn from random import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_cat_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp11 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + r1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x0 + (-4 + r1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp12 = tmp11 * tmp10 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tl.store(out_ptr0 + (r1 + 8 * x0), tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cat_mul_sum_0[grid(16)](buf0, buf1, primals_7, buf2, buf3, 16, 8, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 return buf5, primals_7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf2, buf5 class BahdanauAttentionNew(nn.Module): def __init__(self, hidden_size): super().__init__() self.hidden_size = hidden_size self.w1 = nn.Linear(hidden_size, hidden_size) self.w2 = nn.Linear(hidden_size, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size * 2)) stdv = 1.0 / math.sqrt(hidden_size) self.v.data.normal_(mean=0, std=stdv) def forward(self, input_0, input_1): primals_7 = self.v primals_3 = self.w1.weight primals_4 = self.w1.bias primals_5 = self.w2.weight primals_6 = self.w2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
evinaybit/100-Days-of-NLP
BahdanauAttention
false
15,321
[ "MIT" ]
239
81e08884dd31b7b99bef27f43a179cda09ab5732
https://github.com/evinaybit/100-Days-of-NLP/tree/81e08884dd31b7b99bef27f43a179cda09ab5732
Attention
import torch import torch.nn as nn from random import * class Attention(nn.Module): def __init__(self, hidden_size): super().__init__() self.hidden_size = hidden_size self.w1 = nn.Linear(hidden_size, hidden_size) self.w2 = nn.Linear(hidden_size, hidden_size) self.v = nn.Linear(hidden_size, 1, bias=False) def forward(self, encoder_outputs, hidden, mask=None): encoder_energy = self.w1(encoder_outputs) decoder_energy = self.w2(hidden.squeeze(1)) decoder_energy = decoder_energy.unsqueeze(1) combined = torch.tanh(encoder_energy + decoder_energy) energy = self.v(combined) energy = energy.squeeze(-1) if mask is not None: energy = energy.masked_fill(mask, -10000000000.0) attention = torch.softmax(energy, dim=-1) return attention def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from random import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 4 x3 = xindex // 256 x5 = xindex % 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_2, buf1, primals_6, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_6 buf3 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.mm(reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), buf2, buf5, primals_7 class AttentionNew(nn.Module): def __init__(self, hidden_size): super().__init__() self.hidden_size = hidden_size self.w1 = nn.Linear(hidden_size, hidden_size) self.w2 = nn.Linear(hidden_size, hidden_size) self.v = nn.Linear(hidden_size, 1, bias=False) def forward(self, input_0, input_1): primals_1 = self.w1.weight primals_2 = self.w1.bias primals_5 = self.w2.weight primals_6 = self.w2.bias primals_7 = self.v.weight primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
evinaybit/100-Days-of-NLP
Attention
false
15,322
[ "MIT" ]
239
81e08884dd31b7b99bef27f43a179cda09ab5732
https://github.com/evinaybit/100-Days-of-NLP/tree/81e08884dd31b7b99bef27f43a179cda09ab5732
ChannelAttentionGG
import math import torch import torch.optim import torch.utils.data class ChannelAttention(torch.nn.Module): def __init__(self, N_out, N_in, ratio=1): super(ChannelAttention, self).__init__() self.linear = torch.nn.functional.linear self.avg_pool = torch.nn.AdaptiveAvgPool2d(1) self.max_pool = torch.nn.AdaptiveMaxPool2d(1) self.N_in = N_in self.N_out = N_out self.weight_fc1 = torch.nn.Parameter(torch.Tensor(self.N_out, self. N_in // ratio, self.N_in)) self.weight_fc2 = torch.nn.Parameter(torch.Tensor(self.N_out, self. N_in, self.N_in // ratio)) self.reset_parameters() def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight_fc1, a=math.sqrt(5)) torch.nn.init.kaiming_uniform_(self.weight_fc2, a=math.sqrt(5)) def forward(self, input): input_mean = input.mean(dim=[-2, -1]).unsqueeze(-1) input_max = input.max(dim=-2)[0].max(dim=-1)[0].unsqueeze(-1) avg_out = self._linear(torch.relu(self._linear(input_mean, self. weight_fc1)), self.weight_fc2) max_out = self._linear(torch.relu(self._linear(input_max, self. weight_fc1)), self.weight_fc2) out = torch.sigmoid(avg_out + max_out) out = torch.reshape(out, [input.shape[0], self.N_out, input.shape[2 ], self.N_in, 1, 1]) return out def _linear(self, input, w): in_reshaped = input.unsqueeze(-3) w_reshaped = w.reshape(1, w.shape[0], 1, w.shape[1], w.shape[2], 1) output = (in_reshaped * w_reshaped).sum(-2) return output class ChannelAttentionGG(ChannelAttention): def __init__(self, N_h, N_out, N_h_in, N_in, ratio=1, bias=False): super(ChannelAttentionGG, self).__init__(N_out, N_in, ratio=ratio) self.N_h_in = N_h_in self.N_h = N_h self.weight_fc1 = torch.nn.Parameter(torch.rand(self.N_out, self. N_in // ratio, self.N_in, self.N_h_in)) self.weight_fc2 = torch.nn.Parameter(torch.rand(self.N_out, self. N_in, self.N_in // ratio, self.N_h_in)) self.reset_parameters() def forward(self, input): fc1, fc2 = self._left_action_of_h_grid() input_mean = input.mean(dim=[-2, -1]).unsqueeze(-1) input_max = input.max(dim=-2)[0].max(dim=-1)[0].unsqueeze(-1) avg_out = self._linear(torch.relu(self._linear(input_mean, fc1)), fc2) max_out = self._linear(torch.relu(self._linear(input_max, fc1)), fc2) out = torch.sigmoid(avg_out + max_out) out = torch.reshape(out, [input.shape[0], self.N_out, self.N_h, -1, self.N_h_in, 1, 1]) return out def _linear(self, input, w): in_reshaped = input.unsqueeze(-4) w_reshaped = torch.reshape(w, [1, w.shape[0], w.shape[1], w.shape[2 ], w.shape[3], w.shape[4], 1]) output = (in_reshaped * w_reshaped).sum(-3) return output def _left_action_of_h_grid(self): fc1 = torch.stack([self.weight_fc1.roll(shifts=i, dims=-1) for i in range(self.N_h)], dim=1) fc2 = torch.stack([self.weight_fc2.roll(shifts=i, dims=-1) for i in range(self.N_h)], dim=1) return fc1, fc2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'N_h': 4, 'N_out': 4, 'N_h_in': 4, 'N_in': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 16 x3 = xindex // 256 x4 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 * x1 + 16 * (-4 + x2) + 64 * x3 + (3 + x0) % 4), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + 16 * (-8 + x2) + 64 * x3 + (2 + x0) % 4), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (4 * x1 + 16 * (-12 + x2) + 64 * x3 + (1 + x0 ) % 4), tmp16 & xmask, other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x5, tmp22, xmask) @triton.jit def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = triton_helpers.maximum(tmp6, tmp13) tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = triton_helpers.maximum(tmp14, tmp21) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = triton_helpers.maximum(tmp25, tmp26) tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp22, tmp29) tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.full([1], 0, tl.int32) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = tmp17 * tmp1 tmp20 = tmp19 * tmp4 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp8 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp12 tmp27 = tmp24 + tmp26 tmp28 = triton_helpers.maximum(tmp15, tmp27) tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_sum_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x4 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x4), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x4), xmask) tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x4), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x4), xmask) tmp15 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr2 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr2 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp16 = tmp15 * tmp1 tmp18 = tmp17 * tmp4 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp8 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp12 tmp25 = tmp22 + tmp24 tmp26 = tmp14 + tmp25 tmp27 = tl.sigmoid(tmp26) tmp28 = 1.0 tmp29 = tmp28 - tmp27 tmp30 = tmp27 * tmp29 tl.store(in_out_ptr0 + x3, tmp27, xmask) tl.store(out_ptr0 + x3, tmp30, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_stack_0[grid(1024)](primals_1, buf0, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) triton_poi_fused_stack_0[grid(1024)](primals_2, buf1, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = buf2 del buf2 triton_per_fused_mean_1[grid(16)](buf4, primals_3, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_max_2[grid(16)](primals_3, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf5 = empty_strided_cuda((1, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1 ), torch.float32) buf6 = empty_strided_cuda((1, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1 ), torch.float32) triton_poi_fused_mul_relu_sum_3[grid(256)](buf4, buf0, buf3, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf7 = empty_strided_cuda((1, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 256), torch.float32) buf8 = reinterpret_tensor(buf7, (1, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1), 0) del buf7 buf9 = empty_strided_cuda((1, 4, 4, 4, 4, 1), (256, 64, 16, 4, 1, 1 ), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_sum_4[grid(256)](buf8 , buf5, buf1, buf6, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf8, (4, 4, 4, 1, 4, 1, 1), (64, 16, 4, 4, 1, 1, 1), 0), buf1, reinterpret_tensor(buf4, (1, 4, 4, 1), (16, 4, 1, 1), 0), buf5, reinterpret_tensor(buf3, (1, 4, 4, 1), (16, 4, 1, 1), 0 ), buf6, buf9 class ChannelAttention(torch.nn.Module): def __init__(self, N_out, N_in, ratio=1): super(ChannelAttention, self).__init__() self.linear = torch.nn.functional.linear self.avg_pool = torch.nn.AdaptiveAvgPool2d(1) self.max_pool = torch.nn.AdaptiveMaxPool2d(1) self.N_in = N_in self.N_out = N_out self.weight_fc1 = torch.nn.Parameter(torch.Tensor(self.N_out, self. N_in // ratio, self.N_in)) self.weight_fc2 = torch.nn.Parameter(torch.Tensor(self.N_out, self. N_in, self.N_in // ratio)) self.reset_parameters() def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight_fc1, a=math.sqrt(5)) torch.nn.init.kaiming_uniform_(self.weight_fc2, a=math.sqrt(5)) def forward(self, input): input_mean = input.mean(dim=[-2, -1]).unsqueeze(-1) input_max = input.max(dim=-2)[0].max(dim=-1)[0].unsqueeze(-1) avg_out = self._linear(torch.relu(self._linear(input_mean, self. weight_fc1)), self.weight_fc2) max_out = self._linear(torch.relu(self._linear(input_max, self. weight_fc1)), self.weight_fc2) out = torch.sigmoid(avg_out + max_out) out = torch.reshape(out, [input.shape[0], self.N_out, input.shape[2 ], self.N_in, 1, 1]) return out def _linear(self, input, w): in_reshaped = input.unsqueeze(-3) w_reshaped = w.reshape(1, w.shape[0], 1, w.shape[1], w.shape[2], 1) output = (in_reshaped * w_reshaped).sum(-2) return output class ChannelAttentionGGNew(ChannelAttention): def __init__(self, N_h, N_out, N_h_in, N_in, ratio=1, bias=False): super(ChannelAttentionGGNew, self).__init__(N_out, N_in, ratio=ratio) self.N_h_in = N_h_in self.N_h = N_h self.weight_fc1 = torch.nn.Parameter(torch.rand(self.N_out, self. N_in // ratio, self.N_in, self.N_h_in)) self.weight_fc2 = torch.nn.Parameter(torch.rand(self.N_out, self. N_in, self.N_in // ratio, self.N_h_in)) self.reset_parameters() def _linear(self, input, w): in_reshaped = input.unsqueeze(-4) w_reshaped = torch.reshape(w, [1, w.shape[0], w.shape[1], w.shape[2 ], w.shape[3], w.shape[4], 1]) output = (in_reshaped * w_reshaped).sum(-3) return output def _left_action_of_h_grid(self): fc1 = torch.stack([self.weight_fc1.roll(shifts=i, dims=-1) for i in range(self.N_h)], dim=1) fc2 = torch.stack([self.weight_fc2.roll(shifts=i, dims=-1) for i in range(self.N_h)], dim=1) return fc1, fc2 def forward(self, input_0): primals_1 = self.weight_fc1 primals_2 = self.weight_fc2 primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dwromero/att_gconvs
ChannelAttentionGG
false
15,323
[ "MIT" ]
53
872259cad49763fdcfa3e96e80b6b5c331adf084
https://github.com/dwromero/att_gconvs/tree/872259cad49763fdcfa3e96e80b6b5c331adf084
DepthL1Loss
import torch import torch.nn as nn class DepthL1Loss(nn.Module): def __init__(self, eps=1e-05): super(DepthL1Loss, self).__init__() self.eps = eps def forward(self, pred, gt): bs = pred.size()[0] img1 = torch.zeros_like(pred) img2 = torch.zeros_like(gt) img1 = img1.copy_(pred) img2 = img2.copy_(gt) mask = gt > self.eps img1[~mask] = 0.0 img2[~mask] = 0.0 loss = nn.L1Loss(reduction='sum')(img1, img2) loss = loss / mask.float().sum() * bs return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_abs_div_gt_index_put_lift_fresh_mul_sub_sum_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r0, None) tmp1 = 1e-05 tmp2 = tmp0 > tmp1 tmp3 = tmp2 == 0 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp5, tmp4) tmp7 = tl.where(tmp3, tmp5, tmp0) tmp8 = tmp6 - tmp7 tmp9 = tl_math.abs(tmp8) tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = tmp2.to(tl.float32) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp12 / tmp16 tmp18 = 4.0 tmp19 = tmp17 * tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 get_raw_stream(0) triton_per_fused__to_copy_abs_div_gt_index_put_lift_fresh_mul_sub_sum_0[ grid(1)](buf4, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class DepthL1LossNew(nn.Module): def __init__(self, eps=1e-05): super(DepthL1LossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ezxzeng/FFB6D
DepthL1Loss
false
15,324
[ "MIT" ]
145
fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
https://github.com/ezxzeng/FFB6D/tree/fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
C3D
import torch from torch import nn def get_10x_lr_params(model): """ This generator returns all the parameters for the fc layer of the net. """ b = [model.linear] for j in range(len(b)): for k in b[j].parameters(): if k.requires_grad: yield k def get_1x_lr_params(model): """ This generator returns all the parameters for the conv layer of the net. """ b = [model.res2plus1d] for i in range(len(b)): for k in b[i].parameters(): if k.requires_grad: yield k class C3D(nn.Module): """ The C3D network. """ def __init__(self, num_classes, pretrained=False): self._prepare_base_model() super(C3D, self).__init__() self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)) self.fc6 = nn.Linear(8192, 4096) self.fc7 = nn.Linear(4096, 4096) self.fc8 = nn.Linear(4096, num_classes) self.dropout = nn.Dropout(p=0.5) self.relu = nn.ReLU() self.__init_weight() if pretrained: self.__load_pretrained_weights() def forward(self, x): x = self.relu(self.conv1(x)) x = self.pool1(x) x = self.relu(self.conv2(x)) x = self.pool2(x) x = self.relu(self.conv3a(x)) x = self.relu(self.conv3b(x)) x = self.pool3(x) x = self.relu(self.conv4a(x)) x = self.relu(self.conv4b(x)) x = self.pool4(x) x = self.relu(self.conv5a(x)) x = self.relu(self.conv5b(x)) x = self.pool5(x) x = x.view(-1, 8192) x = self.relu(self.fc6(x)) x = self.dropout(x) x = self.relu(self.fc7(x)) x = self.dropout(x) logits = self.fc8(x) return logits def __load_pretrained_weights(self): """Initialiaze network.""" corresp_name = {'features.0.weight': 'conv1.weight', 'features.0.bias': 'conv1.bias', 'features.3.weight': 'conv2.weight', 'features.3.bias': 'conv2.bias', 'features.6.weight': 'conv3a.weight', 'features.6.bias': 'conv3a.bias', 'features.8.weight': 'conv3b.weight', 'features.8.bias': 'conv3b.bias', 'features.11.weight': 'conv4a.weight', 'features.11.bias': 'conv4a.bias', 'features.13.weight': 'conv4b.weight', 'features.13.bias': 'conv4b.bias', 'features.16.weight': 'conv5a.weight', 'features.16.bias': 'conv5a.bias', 'features.18.weight': 'conv5b.weight', 'features.18.bias': 'conv5b.bias', 'classifier.0.weight': 'fc6.weight', 'classifier.0.bias': 'fc6.bias', 'classifier.3.weight': 'fc7.weight', 'classifier.3.bias': 'fc7.bias'} p_dict = torch.load(pretrained_path) s_dict = self.state_dict() for name in p_dict: if name not in corresp_name: continue s_dict[corresp_name[name]] = p_dict[name] self.load_state_dict(s_dict) def __init_weight(self): for m in self.modules(): if isinstance(m, nn.Conv3d): torch.nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() def get_optim_policies(self, lr): return [{'params': get_1x_lr_params(self), 'lr': lr}, {'params': get_10x_lr_params(self), 'lr': lr * 10}] def _prepare_base_model(self): self.crop_size = 112 self.scale_size = 256 self.input_mean = [0.43216, 0.394666, 0.37645] self.input_std = [0.22803, 0.22145, 0.216989] def get_inputs(): return [torch.rand([4, 3, 64, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 262144 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 8192 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 128 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 8192 x0 = xindex % 8192 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 9, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 8192 * x1), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 12, tl.int64) tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, None) @triton.jit def triton_poi_fused_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_13, (512,), (1,)) assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (4096, 8192), (8192, 1)) assert_size_stride(primals_19, (4096,), (1,)) assert_size_stride(primals_20, (4096, 4096), (4096, 1)) assert_size_stride(primals_21, (4096,), (1,)) assert_size_stride(primals_22, (4, 4096), (4096, 1)) assert_size_stride(primals_23, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64, 64), (16777216, 262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(67108864)](buf1, primals_2, 67108864, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2, 2], [1, 2, 2]) buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 64, 32, 32), (8388608, 65536, 1024, 32, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_1[grid(33554432)](buf6, primals_5, 33554432, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2, 2], [2, 2, 2]) buf8 = buf7[0] buf9 = buf7[1] del buf7 buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1 ), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 32, 16, 16), (2097152, 8192, 256, 16, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_2[grid(8388608)](buf11, primals_7, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 32, 16, 16), (2097152, 8192, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_2[grid(8388608)](buf13, primals_9, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2, 2], [2, 2, 2]) buf15 = buf14[0] buf16 = buf14[1] del buf14 buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 512, 16, 8, 8), (524288, 1024, 64, 8, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_3[grid(2097152)](buf18, primals_11, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 512, 16, 8, 8), (524288, 1024, 64, 8, 1)) buf20 = buf19 del buf19 triton_poi_fused_convolution_relu_3[grid(2097152)](buf20, primals_13, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2, 2], [2, 2, 2]) buf22 = buf21[0] buf23 = buf21[1] del buf21 buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 8, 4, 4), (65536, 128, 16, 4, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_4[grid(262144)](buf25, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 8, 4, 4), (65536, 128, 16, 4, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_4[grid(262144)](buf27, primals_17, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2, 2], [2, 2, 2], [0, 1, 1]) buf29 = buf28[0] buf30 = buf28[1] del buf28 buf31 = empty_strided_cuda((12, 8192), (8192, 1), torch.float32) triton_poi_fused_5[grid(98304)](buf29, buf31, 98304, XBLOCK=512, num_warps=8, num_stages=1) buf32 = empty_strided_cuda((12, 4096), (4096, 1), torch.float32) extern_kernels.mm(buf31, reinterpret_tensor(primals_18, (8192, 4096 ), (1, 8192), 0), out=buf32) del buf31 buf33 = empty_strided_cuda((9, 4096), (4096, 1), torch.float32) triton_poi_fused_relu_6[grid(36864)](buf32, primals_19, buf33, 36864, XBLOCK=512, num_warps=4, num_stages=1) del buf32 del primals_19 buf34 = empty_strided_cuda((9, 4096), (4096, 1), torch.float32) extern_kernels.mm(buf33, reinterpret_tensor(primals_20, (4096, 4096 ), (1, 4096), 0), out=buf34) buf35 = buf34 del buf34 triton_poi_fused_relu_7[grid(36864)](buf35, primals_21, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_21 buf36 = empty_strided_cuda((9, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_23, buf35, reinterpret_tensor( primals_22, (4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf36) del primals_23 return (buf36, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf4, buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22, buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (9, 8192), ( 8192, 1), 0), buf33, buf35, primals_22, primals_20, primals_18) def get_10x_lr_params(model): """ This generator returns all the parameters for the fc layer of the net. """ b = [model.linear] for j in range(len(b)): for k in b[j].parameters(): if k.requires_grad: yield k def get_1x_lr_params(model): """ This generator returns all the parameters for the conv layer of the net. """ b = [model.res2plus1d] for i in range(len(b)): for k in b[i].parameters(): if k.requires_grad: yield k class C3DNew(nn.Module): """ The C3D network. """ def __init__(self, num_classes, pretrained=False): self._prepare_base_model() super(C3DNew, self).__init__() self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)) self.fc6 = nn.Linear(8192, 4096) self.fc7 = nn.Linear(4096, 4096) self.fc8 = nn.Linear(4096, num_classes) self.dropout = nn.Dropout(p=0.5) self.relu = nn.ReLU() self.__init_weight() if pretrained: self.__load_pretrained_weights() def __load_pretrained_weights(self): """Initialiaze network.""" corresp_name = {'features.0.weight': 'conv1.weight', 'features.0.bias': 'conv1.bias', 'features.3.weight': 'conv2.weight', 'features.3.bias': 'conv2.bias', 'features.6.weight': 'conv3a.weight', 'features.6.bias': 'conv3a.bias', 'features.8.weight': 'conv3b.weight', 'features.8.bias': 'conv3b.bias', 'features.11.weight': 'conv4a.weight', 'features.11.bias': 'conv4a.bias', 'features.13.weight': 'conv4b.weight', 'features.13.bias': 'conv4b.bias', 'features.16.weight': 'conv5a.weight', 'features.16.bias': 'conv5a.bias', 'features.18.weight': 'conv5b.weight', 'features.18.bias': 'conv5b.bias', 'classifier.0.weight': 'fc6.weight', 'classifier.0.bias': 'fc6.bias', 'classifier.3.weight': 'fc7.weight', 'classifier.3.bias': 'fc7.bias'} p_dict = torch.load(pretrained_path) s_dict = self.state_dict() for name in p_dict: if name not in corresp_name: continue s_dict[corresp_name[name]] = p_dict[name] self.load_state_dict(s_dict) def __init_weight(self): for m in self.modules(): if isinstance(m, nn.Conv3d): torch.nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() def get_optim_policies(self, lr): return [{'params': get_1x_lr_params(self), 'lr': lr}, {'params': get_10x_lr_params(self), 'lr': lr * 10}] def _prepare_base_model(self): self.crop_size = 112 self.scale_size = 256 self.input_mean = [0.43216, 0.394666, 0.37645] self.input_std = [0.22803, 0.22145, 0.216989] def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3a.weight primals_7 = self.conv3a.bias primals_8 = self.conv3b.weight primals_9 = self.conv3b.bias primals_10 = self.conv4a.weight primals_11 = self.conv4a.bias primals_12 = self.conv4b.weight primals_13 = self.conv4b.bias primals_14 = self.conv5a.weight primals_15 = self.conv5a.bias primals_16 = self.conv5b.weight primals_17 = self.conv5b.bias primals_18 = self.fc6.weight primals_19 = self.fc6.bias primals_20 = self.fc7.weight primals_21 = self.fc7.bias primals_22 = self.fc8.weight primals_23 = self.fc8.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0]
datamllab/autovideo
C3D
false
15,325
[ "MIT" ]
233
34a702fe9d3114e7128dcff12cb43369e4932919
https://github.com/datamllab/autovideo/tree/34a702fe9d3114e7128dcff12cb43369e4932919
OFLoss
import torch from torch.nn.modules.loss import _Loss def of_l1_loss(pred_ofsts, kp_targ_ofst, labels, sigma=1.0, normalize=True, reduce=False): """ :param pred_ofsts: [bs, n_kpts, n_pts, c] :param kp_targ_ofst: [bs, n_pts, n_kpts, c] :param labels: [bs, n_pts, 1] """ w = (labels > 1e-08).float() bs, n_kpts, n_pts, c = pred_ofsts.size() sigma ** 3 w = w.view(bs, 1, n_pts, 1).repeat(1, n_kpts, 1, 1).contiguous() kp_targ_ofst = kp_targ_ofst.view(bs, n_pts, n_kpts, c) kp_targ_ofst = kp_targ_ofst.permute(0, 2, 1, 3).contiguous() diff = pred_ofsts - kp_targ_ofst abs_diff = torch.abs(diff) abs_diff = w * abs_diff in_loss = abs_diff if normalize: in_loss = torch.sum(in_loss.view(bs, n_kpts, -1), 2) / (torch.sum(w .view(bs, n_kpts, -1), 2) + 0.001) if reduce: in_loss = torch.mean(in_loss) return in_loss class OFLoss(_Loss): def __init__(self): super(OFLoss, self).__init__(True) def forward(self, pred_ofsts, kp_targ_ofst, labels, normalize=True, reduce=False): l1_loss = of_l1_loss(pred_ofsts, kp_targ_ofst, labels, sigma=1.0, normalize=True, reduce=False) return l1_loss def get_inputs(): return [torch.rand([4, 1, 4, 1]), torch.rand([4, 1, 4, 1]), torch.rand( [4, 1, 4, 1])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1e-08 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 * tmp7 tmp10 = tmp9 > tmp1 tmp11 = tmp10.to(tl.float32) tmp14 = tmp12 - tmp13 tmp15 = tl_math.abs(tmp14) tmp16 = tmp11 * tmp15 tmp17 = tmp8 + tmp16 tmp19 = tmp18 > tmp1 tmp20 = tmp19.to(tl.float32) tmp23 = tmp21 - tmp22 tmp24 = tl_math.abs(tmp23) tmp25 = tmp20 * tmp24 tmp26 = tmp17 + tmp25 tmp28 = tmp27 > tmp1 tmp29 = tmp28.to(tl.float32) tmp32 = tmp30 - tmp31 tmp33 = tl_math.abs(tmp32) tmp34 = tmp29 * tmp33 tmp35 = tmp26 + tmp34 tmp36 = tmp3 + tmp11 tmp37 = tmp36 + tmp20 tmp38 = tmp37 + tmp29 tmp39 = 0.001 tmp40 = tmp38 + tmp39 tmp41 = tmp35 / tmp40 tl.store(in_out_ptr0 + x0, tmp41, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 1, 4, 1), (4, 4, 1, 1)) assert_size_stride(arg1_1, (4, 1, 4, 1), (4, 4, 1, 1)) assert_size_stride(arg2_1, (4, 1, 4, 1), (4, 4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_div_sum_0[grid(4)](buf1, arg1_1, arg0_1, arg2_1, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, def of_l1_loss(pred_ofsts, kp_targ_ofst, labels, sigma=1.0, normalize=True, reduce=False): """ :param pred_ofsts: [bs, n_kpts, n_pts, c] :param kp_targ_ofst: [bs, n_pts, n_kpts, c] :param labels: [bs, n_pts, 1] """ w = (labels > 1e-08).float() bs, n_kpts, n_pts, c = pred_ofsts.size() sigma ** 3 w = w.view(bs, 1, n_pts, 1).repeat(1, n_kpts, 1, 1).contiguous() kp_targ_ofst = kp_targ_ofst.view(bs, n_pts, n_kpts, c) kp_targ_ofst = kp_targ_ofst.permute(0, 2, 1, 3).contiguous() diff = pred_ofsts - kp_targ_ofst abs_diff = torch.abs(diff) abs_diff = w * abs_diff in_loss = abs_diff if normalize: in_loss = torch.sum(in_loss.view(bs, n_kpts, -1), 2) / (torch.sum(w .view(bs, n_kpts, -1), 2) + 0.001) if reduce: in_loss = torch.mean(in_loss) return in_loss class OFLossNew(_Loss): def __init__(self): super(OFLossNew, self).__init__(True) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ezxzeng/FFB6D
OFLoss
false
15,326
[ "MIT" ]
145
fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
https://github.com/ezxzeng/FFB6D/tree/fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
MinibatchStd
import torch import torch.nn as nn import torch.utils.tensorboard class MinibatchStd(nn.Module): """ Adds the aveage std of each data point over a slice of the minibatch to that slice as a new feature map. This gives an output with one extra channel. Arguments: group_size (int): Number of entries in each slice of the batch. If <= 0, the entire batch is used. Default value is 4. eps (float): Epsilon value added for numerical stability. Default value is 1e-8. """ def __init__(self, group_size=4, eps=1e-08, *args, **kwargs): super(MinibatchStd, self).__init__() if group_size is None or group_size <= 0: group_size = 0 assert group_size != 1, 'Can not use 1 as minibatch std group size.' self.group_size = group_size self.eps = eps def forward(self, input, **kwargs): """ Add a new feature map to the input containing the average standard deviation for each slice. Arguments: input (torch.Tensor) Returns: output (torch.Tensor) """ group_size = self.group_size or input.size(0) assert input.size(0 ) >= group_size, 'Can not use a smaller batch size ' + '({}) than the specified '.format( input.size(0)) + 'group size ({}) '.format(group_size ) + 'of this minibatch std layer.' assert input.size(0 ) % group_size == 0, 'Can not use a batch of a size ' + '({}) that is not '.format( input.size(0)) + 'evenly divisible by the group size ({})'.format( group_size) x = input y = input.view(group_size, -1, *input.size()[1:]) y = y.float() y -= y.mean(dim=0, keepdim=True) y = torch.mean(y ** 2, dim=0) y = torch.sqrt(y + self.eps) y = torch.mean(y.view(y.size(0), -1), dim=-1) y = y.view(-1, *([1] * (input.dim() - 1))) y = y y = y.repeat(group_size, *([1] * (y.dim() - 1))) y = y.expand(y.size(0), 1, *x.size()[2:]) x = torch.cat([x, y], dim=1) return x def extra_repr(self): return 'group_size={}'.format(self.group_size or '-1') def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.tensorboard assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_mean_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr1 + (x0 + 80 * x1), tmp10, xmask) tl.store(out_ptr2 + x2, tmp10, xmask) @triton.jit def triton_per_fused_cat_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr0 + (128 + r0), None) tmp8 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp20, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (80, 16, 4, 1), 0) get_raw_stream(0) triton_poi_fused_cat_mean_sub_0[grid(256)](arg0_1, buf0, buf2, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = reinterpret_tensor(buf4, (4, 1, 4, 4), (80, 16, 4, 1), 64) triton_per_fused_cat_mean_1[grid(1)](buf0, buf3, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf4, class MinibatchStdNew(nn.Module): """ Adds the aveage std of each data point over a slice of the minibatch to that slice as a new feature map. This gives an output with one extra channel. Arguments: group_size (int): Number of entries in each slice of the batch. If <= 0, the entire batch is used. Default value is 4. eps (float): Epsilon value added for numerical stability. Default value is 1e-8. """ def __init__(self, group_size=4, eps=1e-08, *args, **kwargs): super(MinibatchStdNew, self).__init__() if group_size is None or group_size <= 0: group_size = 0 assert group_size != 1, 'Can not use 1 as minibatch std group size.' self.group_size = group_size self.eps = eps def extra_repr(self): return 'group_size={}'.format(self.group_size or '-1') def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
andoleg/stylegan2_pytorch
MinibatchStd
false
15,327
[ "MIT" ]
121
27a367d00d35742cf66587f1bd1b1263469a8101
https://github.com/andoleg/stylegan2_pytorch/tree/27a367d00d35742cf66587f1bd1b1263469a8101
CosLoss
import torch from torch.nn.modules.loss import _Loss class CosLoss(_Loss): def __init__(self, eps=1e-05): super(CosLoss, self).__init__(True) self.eps = eps def forward(self, pred_ofsts, kp_targ_ofst, labels, normalize=True): """ :param pred_ofsts: [bs, n_kpts, n_pts, c] :param kp_targ_ofst: [bs, n_pts, n_kpts, c] :param labels: [bs, n_pts, 1] """ None w = (labels > 1e-08).float() bs, n_kpts, n_pts, _c = pred_ofsts.size() pred_vec = pred_ofsts / (torch.norm(pred_ofsts, dim=3, keepdim=True ) + self.eps) None w = w.view(bs, 1, n_pts, 1).repeat(1, n_kpts, 1, 1).contiguous() kp_targ_ofst = kp_targ_ofst.view(bs, n_pts, n_kpts, 3) kp_targ_ofst = kp_targ_ofst.permute(0, 2, 1, 3).contiguous() targ_vec = kp_targ_ofst / (torch.norm(kp_targ_ofst, dim=3, keepdim= True) + self.eps) cos_sim = pred_vec * targ_vec in_loss = -1.0 * w * cos_sim if normalize: in_loss = torch.sum(in_loss.view(bs, n_kpts, -1), 2) / (torch. sum(w.view(bs, n_kpts, -1), 2) + 0.001) return in_loss def get_inputs(): return [torch.rand([4, 1, 4, 1]), torch.rand([4, 4, 1, 3]), torch.rand( [4, 1, 4, 1])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + r1 // 3), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (4 * x0 + r1 // 3), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr2 + (r1 + 12 * x0), rmask & xmask, other=0.0) tmp13 = tl.load(in_ptr2 + (3 * (r1 // 3) + 12 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + (1 + 3 * (r1 // 3) + 12 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr2 + (2 + 3 * (r1 // 3) + 12 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp41 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1e-08 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = -1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp6 * tmp6 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tmp6 / tmp10 tmp14 = tmp13 * tmp13 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp21 + tmp9 tmp23 = tmp12 / tmp22 tmp24 = tmp11 * tmp23 tmp25 = tmp5 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.where(rmask & xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp31 = tmp30 > tmp1 tmp32 = tmp31.to(tl.float32) tmp34 = tmp33 > tmp1 tmp35 = tmp34.to(tl.float32) tmp36 = tmp32 + tmp35 tmp38 = tmp37 > tmp1 tmp39 = tmp38.to(tl.float32) tmp40 = tmp36 + tmp39 tmp42 = tmp41 > tmp1 tmp43 = tmp42.to(tl.float32) tmp44 = tmp40 + tmp43 tmp45 = 0.001 tmp46 = tmp44 + tmp45 tmp47 = tmp29 / tmp46 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp47, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 1, 4, 1), (4, 4, 1, 1)) assert_size_stride(arg1_1, (4, 1, 4, 1), (4, 4, 1, 1)) assert_size_stride(arg2_1, (4, 4, 1, 3), (12, 3, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_add_div_sum_0[grid(4)](buf1, arg0_1, arg1_1, arg2_1, 4, 12, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class CosLossNew(_Loss): def __init__(self, eps=1e-05): super(CosLossNew, self).__init__(True) self.eps = eps def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg2_1 = input_1 arg1_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ezxzeng/FFB6D
CosLoss
false
15,328
[ "MIT" ]
145
fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
https://github.com/ezxzeng/FFB6D/tree/fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
TestPointLSTM
import torch import torch.nn as nn class PointLSTMCell(nn.Module): def __init__(self, pts_num, in_channels, hidden_dim, offset_dim, bias): super(PointLSTMCell, self).__init__() self.bias = bias self.pts_num = pts_num self.in_channels = in_channels self.hidden_dim = hidden_dim self.offset_dim = offset_dim self.pool = nn.Sequential(nn.AdaptiveMaxPool2d((None, 1))) self.conv = nn.Conv2d(in_channels=self.in_channels + self. offset_dim + self.hidden_dim, out_channels=4 * self.hidden_dim, kernel_size=(1, 1), bias=self.bias) def forward(self, input_tensor, hidden_state, cell_state): hidden_state[:, :4] -= input_tensor[:, :4] combined = torch.cat([input_tensor, hidden_state], dim=1) combined_conv = self.conv(combined) cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) i = torch.sigmoid(cc_i) f = torch.sigmoid(cc_f) o = torch.sigmoid(cc_o) g = torch.tanh(cc_g) c_next = f * cell_state + i * g h_next = o * torch.tanh(c_next) return self.pool(h_next), self.pool(c_next) def init_hidden(self, batch_size): return torch.zeros(batch_size, self.hidden_dim, self.pts_num, 1 ), torch.zeros(batch_size, self.hidden_dim, self.pts_num, 1) class TestPointLSTM(nn.Module): def __init__(self): super(TestPointLSTM, self).__init__() self.lstm = PointLSTMCell(pts_num=64, in_channels=132, hidden_dim= 256, offset_dim=4, bias=True) def forward(self, inputs, hidden, cell_state): output = self.lstm(inputs, hidden, cell_state) return output[0], output[1] def get_inputs(): return [torch.rand([4, 388, 4, 4]), torch.rand([4, 4, 4, 4]), torch. rand([4, 256, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 392 x1 = xindex // 392 % 16 x2 = xindex // 6272 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 388, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 16 * x0 + 6208 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 392, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-388 + x0) + 64 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr0 + (x1 + 16 * (-388 + x0) + 6208 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 - tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 16 y3 = yindex // 16 tmp0 = tl.load(in_ptr0 + (512 + x1 + 1024 * y0), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (512 + x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (x1 + 1024 * y0), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (768 + x1 + 1024 * y0), xmask & ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (768 + x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (256 + x1 + 1024 * y0), xmask & ymask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (256 + x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (y2 + 16 * x1 + 4096 * y3), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp14 = tmp12 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp7 * tmp11 tmp19 = tmp17 + tmp18 tmp20 = 1.0 tmp21 = tmp20 - tmp15 tmp22 = tmp15 * tmp21 tmp23 = libdevice.tanh(tmp19) tmp24 = tmp3 * tmp23 tl.store(out_ptr0 + (x1 + 256 * y0), tmp3, xmask & ymask) tl.store(out_ptr1 + (x1 + 256 * y0), tmp7, xmask & ymask) tl.store(out_ptr2 + (x1 + 256 * y0), tmp11, xmask & ymask) tl.store(out_ptr3 + (x1 + 256 * y0), tmp19, xmask & ymask) tl.store(out_ptr4 + (x1 + 256 * y0), tmp22, xmask & ymask) tl.store(out_ptr5 + (x1 + 256 * y0), tmp24, xmask & ymask) @triton.jit def triton_poi_fused_adaptive_max_pool2d_2(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (256 + x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (512 + x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (768 + x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1, 1], 1, tl.int8) tmp9 = tl.full([1, 1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1, 1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1, 1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tmp17 = tl.full([1, 1], 4, tl.int32) tmp18 = tl.where((tmp16 < 0) != (tmp17 < 0), tl.where(tmp16 % tmp17 != 0, tmp16 // tmp17 - 1, tmp16 // tmp17), tmp16 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp16 - tmp19 tmp21 = y0 tmp22 = tmp21 + tmp18 tmp23 = tl.full([1, 1], 0, tl.int64) tmp24 = tmp23 + tmp20 tmp25 = tl.full([1, 1], 4, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + (y0 + 4 * x2 + 1024 * y1), tmp6, xmask & ymask) tl.store(out_ptr1 + (x2 + 256 * y3), tmp27, xmask & ymask) @triton.jit def triton_poi_fused_sub_3(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 6208 * x1), xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr1 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 388, 4, 4), (6208, 16, 4, 1)) assert_size_stride(primals_3, (1024, 392, 1, 1), (392, 1, 1, 1)) assert_size_stride(primals_4, (1024,), (1,)) assert_size_stride(primals_5, (4, 256, 4, 4), (4096, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 392, 4, 4), (6272, 1, 1568, 392), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(25088)](primals_2, primals_1, buf0, 25088, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf3 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) buf2 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) buf4 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) buf5 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) buf11 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) buf6 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(64, 256) ](buf1, primals_4, primals_5, buf3, buf2, buf4, buf5, buf11, buf6, 64, 256, XBLOCK=256, YBLOCK=1, num_warps=4, num_stages=1) del buf1 del primals_4 buf7 = empty_strided_cuda((4, 256, 4, 1), (1024, 4, 1, 1), torch. float32) buf8 = empty_strided_cuda((4, 256, 4, 1), (1024, 1, 256, 256), torch.int64) triton_poi_fused_adaptive_max_pool2d_2[grid(16, 256)](buf6, buf7, buf8, 16, 256, XBLOCK=256, YBLOCK=1, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 256, 4, 1), (1024, 4, 1, 1), torch. float32) buf10 = empty_strided_cuda((4, 256, 4, 1), (1024, 1, 256, 256), torch.int64) triton_poi_fused_adaptive_max_pool2d_2[grid(16, 256)](buf5, buf9, buf10, 16, 256, XBLOCK=256, YBLOCK=1, num_warps=4, num_stages=1) triton_poi_fused_sub_3[grid(256)](primals_1, primals_2, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return (buf7, buf9, primals_3, primals_5, buf0, buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf11) class PointLSTMCell(nn.Module): def __init__(self, pts_num, in_channels, hidden_dim, offset_dim, bias): super(PointLSTMCell, self).__init__() self.bias = bias self.pts_num = pts_num self.in_channels = in_channels self.hidden_dim = hidden_dim self.offset_dim = offset_dim self.pool = nn.Sequential(nn.AdaptiveMaxPool2d((None, 1))) self.conv = nn.Conv2d(in_channels=self.in_channels + self. offset_dim + self.hidden_dim, out_channels=4 * self.hidden_dim, kernel_size=(1, 1), bias=self.bias) def forward(self, input_tensor, hidden_state, cell_state): hidden_state[:, :4] -= input_tensor[:, :4] combined = torch.cat([input_tensor, hidden_state], dim=1) combined_conv = self.conv(combined) cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) i = torch.sigmoid(cc_i) f = torch.sigmoid(cc_f) o = torch.sigmoid(cc_o) g = torch.tanh(cc_g) c_next = f * cell_state + i * g h_next = o * torch.tanh(c_next) return self.pool(h_next), self.pool(c_next) def init_hidden(self, batch_size): return torch.zeros(batch_size, self.hidden_dim, self.pts_num, 1 ), torch.zeros(batch_size, self.hidden_dim, self.pts_num, 1) class TestPointLSTMNew(nn.Module): def __init__(self): super(TestPointLSTMNew, self).__init__() self.lstm = PointLSTMCell(pts_num=64, in_channels=132, hidden_dim= 256, offset_dim=4, bias=True) def forward(self, input_0, input_1, input_2): primals_3 = self.lstm.conv.weight primals_4 = self.lstm.conv.bias primals_2 = input_0 primals_1 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
evanfebrianto/pointlstm_gesture_recognition_pytorch
TestPointLSTM
false
15,329
[ "Apache-2.0" ]
69
797ccdc7da5a859e28f2a8cc7ef7118358b82cb4
https://github.com/evanfebrianto/pointlstm_gesture_recognition_pytorch/tree/797ccdc7da5a859e28f2a8cc7ef7118358b82cb4
ResidualBlock
import torch import torch.utils.data import torch import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1): super(ResidualBlock, self).__init__() self.padding1 = nn.ReflectionPad2d(padding) self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, padding=0, stride=stride) self.bn1 = nn.InstanceNorm2d(out_channels) self.prelu = nn.PReLU() self.padding2 = nn.ReflectionPad2d(padding) self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, padding=0, stride=stride) self.bn2 = nn.InstanceNorm2d(out_channels) def forward(self, x): residual = x out = self.padding1(x) out = self.conv1(out) out = self.bn1(out) out = self.prelu(out) out = self.padding2(out) out = self.conv2(out) out = self.bn2(out) out += residual out = self.prelu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused__prelu_kernel_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + 0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp9 = tmp8 * tmp4 tmp10 = tl.where(tmp6, tmp4, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0) tmp30 = tl.load(in_ptr2 + 0) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp2 - tmp12 tmp25 = tmp24 * tmp23 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp32 = tmp31 * tmp27 tmp33 = tl.where(tmp29, tmp27, tmp32) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr1 + (r2 + 16 * x3), tmp33, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf4 triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2, buf6, primals_3, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused__prelu_kernel_reflection_pad2d_2[grid(576)](buf2, buf3, buf6, primals_4, buf7, 576, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf13 = reinterpret_tensor(buf11, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf11 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_3[ grid(16)](buf9, buf13, primals_6, primals_1, primals_4, buf10, buf14, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_6 return (buf14, primals_1, primals_2, primals_4, primals_5, buf0, buf2, buf3, buf6, buf7, buf9, buf10, buf13) class ResidualBlockNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1): super(ResidualBlockNew, self).__init__() self.padding1 = nn.ReflectionPad2d(padding) self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, padding=0, stride=stride) self.bn1 = nn.InstanceNorm2d(out_channels) self.prelu = nn.PReLU() self.padding2 = nn.ReflectionPad2d(padding) self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, padding=0, stride=stride) self.bn2 = nn.InstanceNorm2d(out_channels) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.prelu.weight primals_5 = self.conv2.weight primals_6 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
eungbean/CoCosNet
ResidualBlock
false
15,330
[ "MIT" ]
319
f8007d9369cc11bc04709ef02dedbbf718d74414
https://github.com/eungbean/CoCosNet/tree/f8007d9369cc11bc04709ef02dedbbf718d74414
PointLSTMCell
import torch import torch.nn as nn class PointLSTMCell(nn.Module): def __init__(self, pts_num, in_channels, hidden_dim, offset_dim, bias): super(PointLSTMCell, self).__init__() self.bias = bias self.pts_num = pts_num self.in_channels = in_channels self.hidden_dim = hidden_dim self.offset_dim = offset_dim self.pool = nn.Sequential(nn.AdaptiveMaxPool2d((None, 1))) self.conv = nn.Conv2d(in_channels=self.in_channels + self. offset_dim + self.hidden_dim, out_channels=4 * self.hidden_dim, kernel_size=(1, 1), bias=self.bias) def forward(self, input_tensor, hidden_state, cell_state): hidden_state[:, :4] -= input_tensor[:, :4] combined = torch.cat([input_tensor, hidden_state], dim=1) combined_conv = self.conv(combined) cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) i = torch.sigmoid(cc_i) f = torch.sigmoid(cc_f) o = torch.sigmoid(cc_o) g = torch.tanh(cc_g) c_next = f * cell_state + i * g h_next = o * torch.tanh(c_next) return self.pool(h_next), self.pool(c_next) def init_hidden(self, batch_size): return torch.zeros(batch_size, self.hidden_dim, self.pts_num, 1 ), torch.zeros(batch_size, self.hidden_dim, self.pts_num, 1) def get_inputs(): return [torch.rand([4, 8, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'pts_num': 4, 'in_channels': 4, 'hidden_dim': 4, 'offset_dim': 4, 'bias': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 12 x0 = xindex % 16 x2 = xindex // 192 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 128 * x2), tmp4 & xmask, other=0.0 ) tmp6 = tmp0 >= tmp3 tl.full([1], 12, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr0 + (x0 + 16 * (-8 + x1) + 128 * x2), tmp6 & xmask, other=0.0) tmp11 = tmp9 - tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x4 = xindex % 64 x1 = xindex // 16 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (128 + x4 + 256 * x2), xmask) tmp1 = tl.load(in_ptr1 + (8 + x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (x4 + 256 * x2), xmask) tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (192 + x4 + 256 * x2), xmask) tmp9 = tl.load(in_ptr1 + (12 + x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (64 + x4 + 256 * x2), xmask) tmp13 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp14 = tmp12 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp7 * tmp11 tmp19 = tmp17 + tmp18 tmp20 = 1.0 tmp21 = tmp20 - tmp15 tmp22 = tmp15 * tmp21 tmp23 = libdevice.tanh(tmp19) tmp24 = tmp3 * tmp23 tl.store(out_ptr0 + x3, tmp3, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) tl.store(out_ptr2 + x3, tmp11, xmask) tl.store(out_ptr3 + x3, tmp19, xmask) tl.store(out_ptr4 + x3, tmp22, xmask) tl.store(out_ptr5 + x3, tmp24, xmask) @triton.jit def triton_poi_fused_adaptive_max_pool2d_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tmp17 = tl.full([1], 4, tl.int32) tmp18 = tl.where((tmp16 < 0) != (tmp17 < 0), tl.where(tmp16 % tmp17 != 0, tmp16 // tmp17 - 1, tmp16 // tmp17), tmp16 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp16 - tmp19 tmp21 = x1 tmp22 = tmp21 + tmp18 tmp23 = tl.full([1], 0, tl.int64) tmp24 = tmp23 + tmp20 tmp25 = tl.full([1], 4, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp27, xmask) @triton.jit def triton_poi_fused_sub_3(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 128 * x1), xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr1 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_3, (16, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_cat_0[grid(768)](primals_2, primals_1, buf0, 768, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 4, 4), (256, 16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(256)]( buf1, primals_4, primals_5, buf3, buf2, buf4, buf5, buf11, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_4 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.int64) triton_poi_fused_adaptive_max_pool2d_2[grid(64)](buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.int64) triton_poi_fused_adaptive_max_pool2d_2[grid(64)](buf5, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) triton_poi_fused_sub_3[grid(256)](primals_1, primals_2, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return (buf7, buf9, primals_3, primals_5, buf0, buf2, buf3, buf4, buf5, buf6, buf8, buf10, buf11) class PointLSTMCellNew(nn.Module): def __init__(self, pts_num, in_channels, hidden_dim, offset_dim, bias): super(PointLSTMCellNew, self).__init__() self.bias = bias self.pts_num = pts_num self.in_channels = in_channels self.hidden_dim = hidden_dim self.offset_dim = offset_dim self.pool = nn.Sequential(nn.AdaptiveMaxPool2d((None, 1))) self.conv = nn.Conv2d(in_channels=self.in_channels + self. offset_dim + self.hidden_dim, out_channels=4 * self.hidden_dim, kernel_size=(1, 1), bias=self.bias) def init_hidden(self, batch_size): return torch.zeros(batch_size, self.hidden_dim, self.pts_num, 1 ), torch.zeros(batch_size, self.hidden_dim, self.pts_num, 1) def forward(self, input_0, input_1, input_2): primals_3 = self.conv.weight primals_4 = self.conv.bias primals_2 = input_0 primals_1 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
evanfebrianto/pointlstm_gesture_recognition_pytorch
PointLSTMCell
false
15,331
[ "Apache-2.0" ]
69
797ccdc7da5a859e28f2a8cc7ef7118358b82cb4
https://github.com/evanfebrianto/pointlstm_gesture_recognition_pytorch/tree/797ccdc7da5a859e28f2a8cc7ef7118358b82cb4
BerHuLoss
import torch import torch.nn as nn class BerHuLoss(nn.Module): def __init__(self, scale=0.5, eps=1e-05): super(BerHuLoss, self).__init__() self.scale = scale self.eps = eps def forward(self, pred, gt): img1 = torch.zeros_like(pred) img2 = torch.zeros_like(gt) img1 = img1.copy_(pred) img2 = img2.copy_(gt) img1 = img1[img2 > self.eps] img2 = img2[img2 > self.eps] diff = torch.abs(img1 - img2) threshold = self.scale * torch.max(diff).detach() mask = diff > threshold diff[mask] = ((img1[mask] - img2[mask]) ** 2 + threshold ** 2) / (2 * threshold + self.eps) return diff.sum() / diff.numel() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_copy_gt_zeros_like_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1e-05 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(out_ptr1 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_copy_zeros_like_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_copy_gt_zeros_like_0[grid(256)](arg1_1, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_copy_zeros_like_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, buf1, buf2 class BerHuLossNew(nn.Module): def __init__(self, scale=0.5, eps=1e-05): super(BerHuLossNew, self).__init__() self.scale = scale self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ezxzeng/FFB6D
BerHuLoss
false
15,332
[ "MIT" ]
145
fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
https://github.com/ezxzeng/FFB6D/tree/fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
LogDepthL1Loss
import torch import torch.nn as nn class LogDepthL1Loss(nn.Module): def __init__(self, eps=1e-05): super(LogDepthL1Loss, self).__init__() self.eps = eps def forward(self, pred, gt): pred = pred.view(-1) gt = gt.view(-1) mask = gt > self.eps diff = torch.abs(torch.log(gt[mask]) - pred[mask]) return diff.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1e-05 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256,), (1,), torch.bool) get_raw_stream(0) triton_poi_fused_gt_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(arg1_1, (256,), (1,), 0 ), buf0, reinterpret_tensor(arg0_1, (256,), (1,), 0) class LogDepthL1LossNew(nn.Module): def __init__(self, eps=1e-05): super(LogDepthL1LossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ezxzeng/FFB6D
LogDepthL1Loss
false
15,333
[ "MIT" ]
145
fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
https://github.com/ezxzeng/FFB6D/tree/fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
_Multiply
from torch.nn import Module import abc import torch from torch import Tensor from torch.nn import Linear from torch.nn import MSELoss import torch.nn from torch import rand class ConverterModule(Module, abc.ABC): """Interface class for test modules for converter.""" @abc.abstractmethod def input_fn(self) ->Tensor: """Generate a fitting input for the module. Returns: an input """ return def loss_fn(self) ->Module: """The loss function. Returns: loss function """ return MSELoss() class _Multiply(ConverterModule): def __init__(self): super().__init__() self.batch_size = 2 self.in_dim = 4 out_dim = 3 self.linear = Linear(self.in_dim, out_dim) def forward(self, x): x = x * 2.5 x = self.linear(x) x = 0.5 * x x = x.multiply(3.1415) return x def input_fn(self) ->Tensor: return rand(self.batch_size, self.in_dim) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import abc from torch import Tensor from torch.nn import Linear from torch.nn import MSELoss import torch.nn from torch import rand assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 2.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 3 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 3.1415 tmp6 = tmp4 * tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (3, 4), (4, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 3), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 3), (48, 12, 3, 1), 0) del buf1 triton_poi_fused_mul_1[grid(192)](buf2, primals_3, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class ConverterModule(Module, abc.ABC): """Interface class for test modules for converter.""" @abc.abstractmethod def input_fn(self) ->Tensor: """Generate a fitting input for the module. Returns: an input """ return def loss_fn(self) ->Module: """The loss function. Returns: loss function """ return MSELoss() class _MultiplyNew(ConverterModule): def __init__(self): super().__init__() self.batch_size = 2 self.in_dim = 4 out_dim = 3 self.linear = Linear(self.in_dim, out_dim) def input_fn(self) ->Tensor: return rand(self.batch_size, self.in_dim) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
f-dangel/backpack
_Multiply
false
15,334
[ "MIT" ]
395
1da7e53ebb2c490e2b7dd9f79116583641f3cca1
https://github.com/f-dangel/backpack/tree/1da7e53ebb2c490e2b7dd9f79116583641f3cca1
FactorizedReduce
import torch import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * def get_norm_layer(norm, C): if norm in [None, '', 'none']: norm_layer = nn.Identity() elif norm.startswith('bn'): norm_layer = nn.BatchNorm2d(C, track_running_stats=norm.find( 'track') >= 0) else: raise NotImplementedError(norm) return norm_layer class FactorizedReduce(nn.Module): def __init__(self, C_in, C_out, norm='bn', stride=2): super(FactorizedReduce, self).__init__() assert C_out % 2 == 0 self.stride = stride self.relu = nn.ReLU(inplace=False) self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=stride, padding =0, bias=False) self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=stride, padding =0, bias=False) self.bn = get_norm_layer(norm, C_out) def forward(self, x): x = self.relu(x) out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:] if self.stride > 1 else x)], dim=1) out = self.bn(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C_in': 4, 'C_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 8 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-2 + x1) + 8 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_2(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = rindex // 4 x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0 + 16 * r2), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 2, 2, 2), (8, 4, 2, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 3, 3), (64, 16, 4, 1), 5), primals_3, stride=(2, 2), padding=(0, 0 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 2, 2), (8, 4, 2, 1)) buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_cat_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del buf2 buf4 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) buf5 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) buf7 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32) triton_per_fused__native_batch_norm_legit_2[grid(4)](buf3, buf4, buf5, buf7, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused__native_batch_norm_legit_3[grid(64)](buf3, buf4, buf5, primals_4, primals_5, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 del primals_5 return (buf8, primals_2, primals_3, primals_4, buf0, buf3, reinterpret_tensor(buf7, (4,), (1,), 0), reinterpret_tensor(buf4, ( 1, 4, 1, 1), (4, 1, 1, 1), 0)) def get_norm_layer(norm, C): if norm in [None, '', 'none']: norm_layer = nn.Identity() elif norm.startswith('bn'): norm_layer = nn.BatchNorm2d(C, track_running_stats=norm.find( 'track') >= 0) else: raise NotImplementedError(norm) return norm_layer class FactorizedReduceNew(nn.Module): def __init__(self, C_in, C_out, norm='bn', stride=2): super(FactorizedReduceNew, self).__init__() assert C_out % 2 == 0 self.stride = stride self.relu = nn.ReLU(inplace=False) self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=stride, padding =0, bias=False) self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=stride, padding =0, bias=False) self.bn = get_norm_layer(norm, C_out) def forward(self, input_0): primals_2 = self.conv_1.weight primals_3 = self.conv_2.weight primals_4 = self.bn.weight primals_5 = self.bn.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
evdcush/ppuda
FactorizedReduce
false
15,335
[ "MIT" ]
262
22783ac92207da6730ee618c953af230c5c39f28
https://github.com/evdcush/ppuda/tree/22783ac92207da6730ee618c953af230c5c39f28
OfstMapL1Loss
import torch import torch.nn as nn class OfstMapL1Loss(nn.Module): def __init__(self, eps=1e-05): super().__init__() self.eps = eps def forward(self, rgb_labels, pred, gt, normalize=True, reduce=True): wgt = (rgb_labels > 1e-08).float() bs, n_kpts, c, h, w = pred.size() wgt = wgt.view(bs, 1, 1, h, w).repeat(1, n_kpts, c, 1, 1).contiguous() diff = pred - gt abs_diff = torch.abs(diff) abs_diff = wgt * abs_diff in_loss = abs_diff if normalize: in_loss = torch.sum(in_loss.view(bs, n_kpts, -1), 2) / (torch. sum(wgt.view(bs, n_kpts, -1), 2) + 0.001) if reduce: in_loss = torch.mean(in_loss) return in_loss def get_inputs(): return [torch.rand([4, 1, 1, 4, 4]), torch.rand([4, 4, 4, 4, 4]), torch .rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = xindex // 4 x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (16 * x1 + r2 % 16), xmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr1 + (r2 + 64 * x3), xmask, other=0.0) tmp5 = tl.load(in_ptr2 + (r2 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 1e-08 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_per_fused_add_div_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 0.001 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = 16.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 1, 1, 4, 4), (16, 16, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_sum_0[grid(16)](arg0_1, arg1_1, arg2_1, buf0, buf1, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_div_mean_1[grid(1)](buf3, buf0, buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, class OfstMapL1LossNew(nn.Module): def __init__(self, eps=1e-05): super().__init__() self.eps = eps def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ezxzeng/FFB6D
OfstMapL1Loss
false
15,336
[ "MIT" ]
145
fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
https://github.com/ezxzeng/FFB6D/tree/fd0ea6471532ab1dc68f9a58b52d9a63f8fb76f2
WeightNormConv2d
import torch import torch.nn as nn import torch.utils.data class WeightNormConv2d(nn.Module): def __init__(self, in_dim, out_dim, kernel_size, stride=1, padding=0, bias=True, weight_norm=True, scale=False): """Intializes a Conv2d augmented with weight normalization. (See torch.nn.utils.weight_norm for detail.) Args: in_dim: number of input channels. out_dim: number of output channels. kernel_size: size of convolving kernel. stride: stride of convolution. padding: zero-padding added to both sides of input. bias: True if include learnable bias parameters, False otherwise. weight_norm: True if apply weight normalization, False otherwise. scale: True if include magnitude parameters, False otherwise. """ super(WeightNormConv2d, self).__init__() if weight_norm: self.conv = nn.utils.weight_norm(nn.Conv2d(in_dim, out_dim, kernel_size, stride=stride, padding=padding, bias=bias)) if not scale: self.conv.weight_g.data = torch.ones_like(self.conv. weight_g.data) self.conv.weight_g.requires_grad = False else: self.conv = nn.Conv2d(in_dim, out_dim, kernel_size, stride= stride, padding=padding, bias=bias) def forward(self, x): """Forward pass. Args: x: input tensor. Returns: transformed tensor. """ return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2, primals_1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(16)](buf4, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf4, buf2, primals_1, primals_2, primals_4, buf1, buf2 class WeightNormConv2dNew(nn.Module): def __init__(self, in_dim, out_dim, kernel_size, stride=1, padding=0, bias=True, weight_norm=True, scale=False): """Intializes a Conv2d augmented with weight normalization. (See torch.nn.utils.weight_norm for detail.) Args: in_dim: number of input channels. out_dim: number of output channels. kernel_size: size of convolving kernel. stride: stride of convolution. padding: zero-padding added to both sides of input. bias: True if include learnable bias parameters, False otherwise. weight_norm: True if apply weight normalization, False otherwise. scale: True if include magnitude parameters, False otherwise. """ super(WeightNormConv2dNew, self).__init__() if weight_norm: self.conv = nn.utils.weight_norm(nn.Conv2d(in_dim, out_dim, kernel_size, stride=stride, padding=padding, bias=bias)) if not scale: self.conv.weight_g.data = torch.ones_like(self.conv. weight_g.data) self.conv.weight_g.requires_grad = False else: self.conv = nn.Conv2d(in_dim, out_dim, kernel_size, stride= stride, padding=padding, bias=bias) def forward(self, input_0): primals_3 = self.conv.bias primals_1 = self.conv.weight_g primals_2 = self.conv.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
eyalbetzalel/GlowGAN
WeightNormConv2d
false
15,337
[ "MIT" ]
54
144b8fef60d9dc38ca66c178a18c0c9a2a17c23e
https://github.com/eyalbetzalel/GlowGAN/tree/144b8fef60d9dc38ca66c178a18c0c9a2a17c23e
multi_scale_spatial
import torch import torch.nn as nn class multi_scale_spatial(nn.Module): def __init__(self, limb_blocks): super(multi_scale_spatial, self).__init__() (self.left_arm, self.right_arm, self.left_leg, self.right_leg, self .head_spine) = limb_blocks self.maxpool1 = nn.AdaptiveMaxPool2d((1, 20)) self.maxpool2 = nn.AdaptiveMaxPool2d((1, 20)) self.maxpool3 = nn.AdaptiveMaxPool2d((1, 20)) self.maxpool4 = nn.AdaptiveMaxPool2d((1, 20)) self.maxpool5 = nn.AdaptiveMaxPool2d((1, 20)) self.avgpool = nn.AdaptiveAvgPool2d((1, 20)) def forward(self, x): ll = self.maxpool1(x[:, :, self.left_leg]) rl = self.maxpool2(x[:, :, self.right_leg]) la = self.maxpool3(x[:, :, self.left_arm]) ra = self.maxpool4(x[:, :, self.right_arm]) hs = self.maxpool5(x[:, :, self.head_spine]) multi_sptial = torch.cat((ll, rl, la, ra, hs), dim=-2) x = self.avgpool(multi_sptial) return x def get_inputs(): return [torch.rand([4, 4, 5, 4])] def get_init_inputs(): return [[], {'limb_blocks': [4, 4, 4, 4, 4]}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 tmp0 = tl.full([1], 0, tl.int64) tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 // 5 tmp4 = (23 + 4 * x0) // 20 tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (16 + 80 * x1 + x0 // 5), tmp6 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp8 = 1 + x0 // 5 tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (17 + 80 * x1 + x0 // 5), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = tl.full([1], 1, tl.int64) tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (36 + 80 * x1 + x0 // 5), tmp15 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp17 = triton_helpers.maximum(tmp16, tmp12) tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (37 + 80 * x1 + x0 // 5), tmp18 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp20 = triton_helpers.maximum(tmp19, tmp17) tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 < tmp1 tmp23 = tmp22 & tmp5 tmp24 = tl.load(in_ptr0 + (56 + 80 * x1 + x0 // 5), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp20) tmp26 = tmp22 & tmp9 tmp27 = tl.load(in_ptr0 + (57 + 80 * x1 + x0 // 5), tmp26 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tl.full([1], 3, tl.int64) tmp30 = tmp29 < tmp1 tmp31 = tmp30 & tmp5 tmp32 = tl.load(in_ptr0 + (76 + 80 * x1 + x0 // 5), tmp31 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp33 = triton_helpers.maximum(tmp32, tmp28) tmp34 = tmp30 & tmp9 tmp35 = tl.load(in_ptr0 + (77 + 80 * x1 + x0 // 5), tmp34 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp36 = triton_helpers.maximum(tmp35, tmp33) tl.store(out_ptr0 + (x0 + 100 * x1), tmp36, xmask) tl.store(out_ptr1 + (x0 + 100 * x1), tmp36, xmask) tl.store(out_ptr2 + (x0 + 100 * x1), tmp36, xmask) tl.store(out_ptr3 + (x0 + 100 * x1), tmp36, xmask) tl.store(out_ptr4 + (x0 + 100 * x1), tmp36, xmask) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 100 * x1), xmask) tmp1 = tl.load(in_ptr0 + (20 + x0 + 100 * x1), xmask) tmp3 = tl.load(in_ptr0 + (40 + x0 + 100 * x1), xmask) tmp5 = tl.load(in_ptr0 + (60 + x0 + 100 * x1), xmask) tmp7 = tl.load(in_ptr0 + (80 + x0 + 100 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp9 = 0.2 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 5, 4), (80, 20, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf5 = empty_strided_cuda((4, 5, 20), (100, 20, 1), torch.float32) buf0 = reinterpret_tensor(buf5, (4, 1, 20), (100, 20, 1), 0) buf1 = reinterpret_tensor(buf5, (4, 1, 20), (100, 20, 1), 20) buf2 = reinterpret_tensor(buf5, (4, 1, 20), (100, 20, 1), 40) buf3 = reinterpret_tensor(buf5, (4, 1, 20), (100, 20, 1), 60) buf4 = reinterpret_tensor(buf5, (4, 1, 20), (100, 20, 1), 80) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0[grid(80)](arg0_1, buf0, buf1, buf2, buf3, buf4, 80, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf6 = empty_strided_cuda((4, 1, 20), (20, 20, 1), torch.float32) triton_poi_fused__adaptive_avg_pool2d_1[grid(80)](buf5, buf6, 80, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del buf2 del buf3 del buf4 del buf5 return buf6, class multi_scale_spatialNew(nn.Module): def __init__(self, limb_blocks): super(multi_scale_spatialNew, self).__init__() (self.left_arm, self.right_arm, self.left_leg, self.right_leg, self .head_spine) = limb_blocks self.maxpool1 = nn.AdaptiveMaxPool2d((1, 20)) self.maxpool2 = nn.AdaptiveMaxPool2d((1, 20)) self.maxpool3 = nn.AdaptiveMaxPool2d((1, 20)) self.maxpool4 = nn.AdaptiveMaxPool2d((1, 20)) self.maxpool5 = nn.AdaptiveMaxPool2d((1, 20)) self.avgpool = nn.AdaptiveAvgPool2d((1, 20)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fabro66/Online-Skeleton-based-Action-Recognition
multi_scale_spatial
false
15,338
[ "MIT" ]
63
de00cbf17ceea98a7d07f68bbbd966bfd02d3b40
https://github.com/fabro66/Online-Skeleton-based-Action-Recognition/tree/de00cbf17ceea98a7d07f68bbbd966bfd02d3b40
LayerNormGRUCell
import torch from typing import Optional import torch.nn.functional as F from torch import nn import torch.utils.data import torch.nn from torch.nn import RNNCellBase import torch.multiprocessing from torch.nn import Identity class LayerNormGRUCell(RNNCellBase): """ Implements GRUCell with layer normalisation and zone-out on top. It inherits the base RNN cell whose trainable weight matrices are used. References: [1] Ba, Jimmy Lei, Jamie Ryan Kiros, and Geoffrey E. Hinton. "Layer Normalization." (2016). [2] Krueger, David, et al. "Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations." (2016). :param input_size: Number of input features to the cell :param hidden_size: Number of hidden states in the cell :param use_layer_norm: If set to True, layer normalisation is applied to reset, update and new tensors before activation. :param dropout: Dropout probability for the hidden states [0,1] """ def __init__(self, input_size: 'int', hidden_size: 'int', use_layer_norm: 'bool'=False, dropout: 'float'=0.0): super(LayerNormGRUCell, self).__init__(input_size, hidden_size, bias=False, num_chunks=3) self.dropout = dropout self.ln_r = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() self.ln_z = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() self.ln_n = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() def forward(self, input: 'torch.Tensor', hx: 'Optional[torch.Tensor]'=None ) ->torch.Tensor: if hx is None: hx = input.new_zeros(size=(input.size(0), self.hidden_size), requires_grad=False) ih = input.mm(self.weight_ih.t()) hh = hx.mm(self.weight_hh.t()) i_r, i_z, i_n = ih.chunk(3, dim=1) h_r, h_z, h_n = hh.chunk(3, dim=1) r = torch.sigmoid(self.ln_r(i_r + h_r)) z = torch.sigmoid(self.ln_z(i_z + h_z)) n = torch.tanh(self.ln_n(i_n + r * h_n)) new_h = (torch.tensor(1.0) - z) * n + z * hx if self.dropout > 0.0: bernouli_mask = F.dropout(torch.ones_like(new_h), p=self. dropout, training=bool(self.training)) new_h = bernouli_mask * new_h + (torch.tensor(1.0) - bernouli_mask ) * hx return new_h class Identity(nn.Module): """ Implements an identity torch module where input is passed as it is to output. There are no parameters in the module. """ def __init__(self) ->None: super(Identity, self).__init__() def forward(self, input: 'torch.Tensor') ->torch.Tensor: return input def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.nn from torch.nn import RNNCellBase import torch.multiprocessing from torch.nn import Identity assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_lift_fresh_mul_sigmoid_sub_tanh_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0 + 12 * x1), xmask) tmp4 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp5 = tl.load(in_ptr1 + (x0 + 12 * x1), xmask) tmp8 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp9 = tl.load(in_ptr1 + (8 + x0 + 12 * x1), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp7 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.tanh(tmp11) tmp13 = 1.0 tmp14 = tmp13 - tmp3 tmp15 = tmp14 * tmp12 tmp16 = 0.0 tmp17 = tmp3 * tmp16 tmp18 = tmp15 + tmp17 tl.store(out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) tl.store(out_ptr2 + x2, tmp12, xmask) tl.store(out_ptr3 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf2) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_lift_fresh_mul_sigmoid_sub_tanh_1[grid(16)](buf1, buf2, buf4, buf3, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 return buf6, primals_1, buf0, reinterpret_tensor(buf2, (4, 4), (12, 1), 8 ), buf3, buf4, buf5 class LayerNormGRUCellNew(RNNCellBase): """ Implements GRUCell with layer normalisation and zone-out on top. It inherits the base RNN cell whose trainable weight matrices are used. References: [1] Ba, Jimmy Lei, Jamie Ryan Kiros, and Geoffrey E. Hinton. "Layer Normalization." (2016). [2] Krueger, David, et al. "Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations." (2016). :param input_size: Number of input features to the cell :param hidden_size: Number of hidden states in the cell :param use_layer_norm: If set to True, layer normalisation is applied to reset, update and new tensors before activation. :param dropout: Dropout probability for the hidden states [0,1] """ def __init__(self, input_size: 'int', hidden_size: 'int', use_layer_norm: 'bool'=False, dropout: 'float'=0.0): super(LayerNormGRUCellNew, self).__init__(input_size, hidden_size, bias=False, num_chunks=3) self.dropout = dropout self.ln_r = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() self.ln_z = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() self.ln_n = nn.LayerNorm(self.hidden_size ) if use_layer_norm else Identity() def forward(self, input_0): primals_2 = self.weight_ih primals_3 = self.weight_hh primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0] class Identity(nn.Module): """ Implements an identity torch module where input is passed as it is to output. There are no parameters in the module. """ def __init__(self) ->None: super(Identity, self).__init__() def forward(self, input: 'torch.Tensor') ->torch.Tensor: return input
faz1993/InnerEye-DeepLearning
LayerNormGRUCell
false
15,339
[ "MIT" ]
402
fb258d5c9a3ba18565b5a67e7ac1f00127d9ecb9
https://github.com/faz1993/InnerEye-DeepLearning/tree/fb258d5c9a3ba18565b5a67e7ac1f00127d9ecb9
LearnedPositionalEncoding
import torch import torch.nn as nn import torch.cuda import torch.distributed class LearnedPositionalEncoding(nn.Module): def __init__(self, context_size, embedding_dim, dropout=0): super(LearnedPositionalEncoding, self).__init__() self.pe = nn.Embedding(context_size, embedding_dim) self.dropout = nn.Dropout(p=dropout) def forward(self, emb, step=None, offset=None): """Embed inputs. Args: emb (FloatTensor): Sequence of word vectors ``(seq_len, batch_size, self.dim)`` step (int or NoneType): If stepwise (``seq_len = 1``), use the encoding for this position. """ if step is None: position_ids = torch.arange(0, emb.shape[0], dtype=torch.long, device=emb.device) else: position_ids = torch.arange(step, step + 1, dtype=torch.long, device=emb.device) position_ids = position_ids.unsqueeze(1).repeat(1, emb.shape[1]) if offset is not None: offset = offset.unsqueeze(0) position_ids += offset pe_vals = self.pe(position_ids) emb = emb + pe_vals emb = self.dropout(emb) return emb def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'context_size': 4, 'embedding_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_repeat_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = x1 tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_repeat_0[grid(16)](buf0, 16, XBLOCK=16, num_warps= 1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_embedding_1[grid(256)](primals_1, primals_2, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf1, buf0 class LearnedPositionalEncodingNew(nn.Module): def __init__(self, context_size, embedding_dim, dropout=0): super(LearnedPositionalEncodingNew, self).__init__() self.pe = nn.Embedding(context_size, embedding_dim) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0): primals_2 = self.pe.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
fangleai/encoder-agnostic-adaptation
LearnedPositionalEncoding
false
15,340
[ "MIT" ]
70
d917e654152df202dd35bba49c409c3ecd24eaf7
https://github.com/fangleai/encoder-agnostic-adaptation/tree/d917e654152df202dd35bba49c409c3ecd24eaf7
MLP
import math import torch import torch.nn as nn import torch.cuda import torch.distributed def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class MLP(nn.Module): def __init__(self, n_embd, n_state, dropout): super(MLP, self).__init__() self.c_fc = nn.Linear(n_embd, n_state) self.c_proj = nn.Linear(n_state, n_embd) self.act = gelu self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): self.c_fc.weight.data.normal_(std=0.02) self.c_fc.bias.data.zero_() self.c_proj.weight.data.normal_(std=0.02) self.c_proj.bias.data.zero_() def forward(self, x): """ x is input, [T, B, n_state] """ h = self.dropout_1(self.act(self.c_fc(x))) h2 = self.dropout_2(self.c_proj(h)) return h2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_embd': 4, 'n_state': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class MLPNew(nn.Module): def __init__(self, n_embd, n_state, dropout): super(MLPNew, self).__init__() self.c_fc = nn.Linear(n_embd, n_state) self.c_proj = nn.Linear(n_state, n_embd) self.act = gelu self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): self.c_fc.weight.data.normal_(std=0.02) self.c_fc.bias.data.zero_() self.c_proj.weight.data.normal_(std=0.02) self.c_proj.bias.data.zero_() def forward(self, input_0): primals_1 = self.c_fc.weight primals_2 = self.c_fc.bias primals_4 = self.c_proj.weight primals_5 = self.c_proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
fangleai/encoder-agnostic-adaptation
MLP
false
15,341
[ "MIT" ]
70
d917e654152df202dd35bba49c409c3ecd24eaf7
https://github.com/fangleai/encoder-agnostic-adaptation/tree/d917e654152df202dd35bba49c409c3ecd24eaf7
KnowledgeDistillationLoss
import torch import torch.nn as nn class KnowledgeDistillationLoss(nn.Module): def __init__(self, reduction='mean', alpha=1.0): super().__init__() self.reduction = reduction self.alpha = alpha def forward(self, inputs, targets, mask=None): inputs = inputs.narrow(1, 0, targets.shape[1]) outputs = torch.log_softmax(inputs, dim=1) labels = torch.softmax(targets * self.alpha, dim=1) loss = (outputs * labels).mean(dim=1) if mask is not None: loss = loss * mask.float() if self.reduction == 'mean': outputs = -torch.mean(loss) elif self.reduction == 'sum': outputs = -torch.sum(loss) else: outputs = -loss return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused__log_softmax__softmax_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + x3, xmask) tmp15 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = tmp14 / tmp21 tmp23 = tmp13 * tmp22 tl.store(out_ptr0 + x3, tmp23, xmask) @triton.jit def triton_per_fused_mean_neg_3(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 64.0 tmp13 = tmp11 / tmp12 tmp14 = -tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](arg1_1, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_mul_2[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_mean_neg_3[grid(1)](buf4, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf4, class KnowledgeDistillationLossNew(nn.Module): def __init__(self, reduction='mean', alpha=1.0): super().__init__() self.reduction = reduction self.alpha = alpha def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
fcdl94/ModelingTheBackground
KnowledgeDistillationLoss
false
15,342
[ "MIT" ]
105
1c589833ce5c1a7446469d4602ceab2cdeac1b0e
https://github.com/fcdl94/ModelingTheBackground/tree/1c589833ce5c1a7446469d4602ceab2cdeac1b0e
ActNorm
import torch import torch.utils.data class ActNorm(torch.nn.Module): def __init__(self, nsq, data_init=True): super(ActNorm, self).__init__() self.initialized = not data_init self.m = torch.nn.Parameter(torch.zeros(1, nsq, 1)) self.logs = torch.nn.Parameter(torch.zeros(1, nsq, 1)) return def forward(self, h): if not self.initialized: _sbatch, nsq, _lchunk = h.size() flatten = h.permute(1, 0, 2).contiguous().view(nsq, -1).data self.m.data = -flatten.mean(1).view(1, nsq, 1) self.logs.data = torch.log(1 / (flatten.std(1) + 1e-07)).view(1, nsq, 1) self.initialized = True h = torch.exp(self.logs) * (h + self.m) logdet = self.logs.sum() * h.size(2) return h, logdet def reverse(self, h): return h * torch.exp(-self.logs) - self.m def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'nsq': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp4 = tmp2 + tmp3 tmp5 = tmp1 * tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_per_fused_mul_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 4.0 tmp5 = tmp3 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(64)](primals_1, primals_3, primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_mul_sum_1[grid(1)](buf2, primals_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) return buf0, buf2, primals_1, primals_2, primals_3 class ActNormNew(torch.nn.Module): def __init__(self, nsq, data_init=True): super(ActNormNew, self).__init__() self.initialized = not data_init self.m = torch.nn.Parameter(torch.zeros(1, nsq, 1)) self.logs = torch.nn.Parameter(torch.zeros(1, nsq, 1)) return def reverse(self, h): return h * torch.exp(-self.logs) - self.m def forward(self, input_0): primals_1 = self.m primals_2 = self.logs primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
entn-at/blow
ActNorm
false
15,343
[ "Apache-2.0" ]
147
b597286b24c7ea88c8d9408f9aa35aa8df2ebe11
https://github.com/entn-at/blow/tree/b597286b24c7ea88c8d9408f9aa35aa8df2ebe11
PosEnc
import torch import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * class PosEnc(nn.Module): def __init__(self, C, ks): super().__init__() self.weight = nn.Parameter(torch.randn(1, C, ks, ks)) def forward(self, x): return x + self.weight def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C': 4, 'ks': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class PosEncNew(nn.Module): def __init__(self, C, ks): super().__init__() self.weight = nn.Parameter(torch.randn(1, C, ks, ks)) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
evdcush/ppuda
PosEnc
false
15,344
[ "MIT" ]
262
22783ac92207da6730ee618c953af230c5c39f28
https://github.com/evdcush/ppuda/tree/22783ac92207da6730ee618c953af230c5c39f28
LearnedUpsampling1d
import torch from torch import nn class LearnedUpsampling1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv_t = nn.ConvTranspose1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= kernel_size, bias=False) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_channels, kernel_size)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): self.conv_t.reset_parameters() nn.init.constant(self.bias, 0) def forward(self, input): batch_size, _, length = input.size() kernel_size, = self.conv_t.kernel_size bias = self.bias.unsqueeze(0).unsqueeze(2).expand(batch_size, self. conv_t.out_channels, length, kernel_size).contiguous().view( batch_size, self.conv_t.out_channels, length * kernel_size) return self.conv_t(input) + bias def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (4 * x1 + x0 % 4), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(4,), padding=(0,), dilation=(1,), transposed=True, output_padding=(0 ,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class LearnedUpsampling1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv_t = nn.ConvTranspose1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= kernel_size, bias=False) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_channels, kernel_size)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): self.conv_t.reset_parameters() nn.init.constant(self.bias, 0) def forward(self, input_0): primals_2 = self.bias primals_1 = self.conv_t.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fdb/samplernn-pytorch
LearnedUpsampling1d
false
15,345
[ "MIT" ]
259
87ce71cc2cf26601a271648597f198df33059f96
https://github.com/fdb/samplernn-pytorch/tree/87ce71cc2cf26601a271648597f198df33059f96
MinibatchStdDev
import torch import torch.utils.cpp_extension class MinibatchStdDev(torch.nn.Module): def __init__(self, group_size, num_channels=1): super().__init__() self.group_size = group_size self.num_channels = num_channels def forward(self, x): N, C, H, W = x.shape G = self.group_size if N % self.group_size == 0 else N F = self.num_channels c = C // F y = x.reshape(G, -1, F, c, H, W) y = y - y.mean(dim=0) y = y.square().mean(dim=0) y = (y + 1e-08).sqrt() y = y.mean(dim=[2, 3, 4]) y = y.reshape(-1, F, 1, 1) y = y.repeat(G, 1, H, W) x = torch.cat([x, y], dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'group_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp28, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class MinibatchStdDevNew(torch.nn.Module): def __init__(self, group_size, num_channels=1): super().__init__() self.group_size = group_size self.num_channels = num_channels def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
STomoya/animeface
MinibatchStdDev
false
15,346
[ "MIT" ]
61
37b3cd26097d7874559d4c152e41e5712b7a1a42
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
SimpleFusionGenerator
import torch import torch.nn as nn import torch.cuda import torch.distributed class SimpleFusionGenerator(nn.Module): def __init__(self, decoder_input_size, lm_input_size, output_size): super(SimpleFusionGenerator, self).__init__() self.decoder_linear = nn.Linear(decoder_input_size, output_size) self.lm_linear = nn.Linear(lm_input_size, output_size, bias=False) self.gen_func = nn.LogSoftmax(dim=-1) def forward(self, decoder_hidden, lm_hidden): """ Compute a distribution over the target dictionary extended by the dynamic dictionary implied by copying source words. Args: decoder_hidden (FloatTensor): hidden outputs ``(batch x tlen, input_size)`` lm_hidden (FloatTensor): hidden outputs ``(batch x tlen, input_size)`` """ decoder_logits = self.decoder_linear(decoder_hidden) lm_logits = self.lm_linear(lm_hidden) logits = (decoder_logits + lm_logits).float() log_probs = self.gen_func(logits) return log_probs def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'decoder_input_size': 4, 'lm_input_size': 4, 'output_size': 4} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = triton_helpers.maximum(tmp5, tmp11) tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = triton_helpers.maximum(tmp19, tmp25) tmp27 = tmp5 - tmp26 tmp28 = tl_math.exp(tmp27) tmp29 = tmp11 - tmp26 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tmp32 = tmp18 - tmp26 tmp33 = tl_math.exp(tmp32) tmp34 = tmp31 + tmp33 tmp35 = tmp25 - tmp26 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tl.store(out_ptr0 + x0, tmp26, xmask) tl.store(out_ptr1 + x0, tmp37, xmask) @triton.jit def triton_poi_fused__log_softmax_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = tl_math.log(tmp7) tmp9 = tmp6 - tmp8 tl.store(in_out_ptr0 + x2, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_add_0[grid(64)](buf0, primals_2, buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__log_softmax_add_1[grid(256)](buf4, primals_2, buf1, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf2 del buf3 del primals_2 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), buf4 class SimpleFusionGeneratorNew(nn.Module): def __init__(self, decoder_input_size, lm_input_size, output_size): super(SimpleFusionGeneratorNew, self).__init__() self.decoder_linear = nn.Linear(decoder_input_size, output_size) self.lm_linear = nn.Linear(lm_input_size, output_size, bias=False) self.gen_func = nn.LogSoftmax(dim=-1) def forward(self, input_0, input_1): primals_1 = self.decoder_linear.weight primals_2 = self.decoder_linear.bias primals_4 = self.lm_linear.weight primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
fangleai/encoder-agnostic-adaptation
SimpleFusionGenerator
false
15,347
[ "MIT" ]
70
d917e654152df202dd35bba49c409c3ecd24eaf7
https://github.com/fangleai/encoder-agnostic-adaptation/tree/d917e654152df202dd35bba49c409c3ecd24eaf7
PointwiseFeedForward
import torch import torch.nn as nn class PointwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_hid, d_inner_hid=None, d_out=None, dropout=0): super(PointwiseFeedForward, self).__init__() if d_inner_hid is None: d_inner_hid = d_hid if d_out is None: d_out = d_inner_hid self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1) self.w_2 = nn.Conv1d(d_inner_hid, d_out, 1) self.dropout = nn.Dropout(dropout) self.relu = nn.ReLU() def forward(self, x): output = self.relu(self.w_1(x.transpose(1, 2))) output = self.w_2(output).transpose(2, 1) output = self.dropout(output) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0 ), primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), ( 16, 1, 4), 0), buf2 class PointwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_hid, d_inner_hid=None, d_out=None, dropout=0): super(PointwiseFeedForwardNew, self).__init__() if d_inner_hid is None: d_inner_hid = d_hid if d_out is None: d_out = d_inner_hid self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1) self.w_2 = nn.Conv1d(d_inner_hid, d_out, 1) self.dropout = nn.Dropout(dropout) self.relu = nn.ReLU() def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
fhamborg/NewsMTSC
PointwiseFeedForward
false
15,348
[ "MIT" ]
46
5a8f88d7fbb921090e984cc378b02d75524c1025
https://github.com/fhamborg/NewsMTSC/tree/5a8f88d7fbb921090e984cc378b02d75524c1025
Noise
import torch import torch.utils.data import torch.nn as nn class Noise(nn.Module): def __init__(self): super(Noise, self).__init__() def forward(self, input, train=False): input = input * 255.0 if train: noise = torch.nn.init.uniform_(torch.zeros_like(input), -0.5, 0.5) output = input + noise output = torch.clamp(output, 0, 255.0) else: output = input.round() * 1.0 output = torch.clamp(output, 0, 255.0) return output / 255.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_mul_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 255.0 tmp2 = tmp0 * tmp1 tmp3 = libdevice.nearbyint(tmp2) tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = triton_helpers.minimum(tmp7, tmp1) tmp9 = 0.00392156862745098 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_mul_round_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NoiseNew(nn.Module): def __init__(self): super(NoiseNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
felixcheng97/IICNet
Noise
false
15,349
[ "MIT" ]
50
2648d7148c01a03226128c24a285c4a52e2b5aa0
https://github.com/felixcheng97/IICNet/tree/2648d7148c01a03226128c24a285c4a52e2b5aa0
Decoder
import torch import torch.nn as nn class Decoder(nn.Module): def __init__(self, latent_size, out_size): super().__init__() self.linear1 = nn.Linear(latent_size, int(out_size / 4)) self.linear2 = nn.Linear(int(out_size / 4), int(out_size / 2)) self.linear3 = nn.Linear(int(out_size / 2), out_size) self.relu = nn.ReLU(True) self.sigmoid = nn.Sigmoid() def forward(self, z): out = self.linear1(z) out = self.relu(out) out = self.linear2(out) out = self.relu(out) out = self.linear3(out) w = self.sigmoid(out) return w def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x0 % 4 // 4) + 16 * ((4 * (x0 // 4 % 4) + x0 % 4) // 16)), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2 * x1 + 8 * (x1 % 4 // 4) + 32 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 1), (1, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (4, 2), (2, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, primals_2, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused_view_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (1, 2), (1, 1 ), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf3 buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(128)](buf4, primals_5, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 2), (2, 1), torch.float32) triton_poi_fused_view_3[grid(128)](buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (2, 4), (1, 2 ), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_sigmoid_4[grid(256)](buf7, primals_7, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, buf7, primals_6, buf8, primals_4, buf9 class DecoderNew(nn.Module): def __init__(self, latent_size, out_size): super().__init__() self.linear1 = nn.Linear(latent_size, int(out_size / 4)) self.linear2 = nn.Linear(int(out_size / 4), int(out_size / 2)) self.linear3 = nn.Linear(int(out_size / 2), out_size) self.relu = nn.ReLU(True) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
finloop/usad
Decoder
false
15,350
[ "BSD-3-Clause" ]
65
5e1bf326af5f1325fa4676a2de978cae6db0481c
https://github.com/finloop/usad/tree/5e1bf326af5f1325fa4676a2de978cae6db0481c
BasicBlock
import torch import torch.nn as nn import torch.utils.data def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super(BasicBlock, self).__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(out_ptr2 + (r1 + 16 * x0), tmp25, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0 , in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(out_ptr2 + (r1 + 16 * x0), tmp27, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_0[grid(16)](buf0, buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1[ grid(16)](buf6, primals_1, buf7, buf11, buf12, buf10, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) return buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor( buf4, (16,), (1,), 0), buf5, buf6, reinterpret_tensor(buf10, (16,), (1,), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlockNew(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super(BasicBlockNew, self).__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ferodia/MichiGAN
BasicBlock
false
15,351
[ "MIT" ]
235
a49acb49f9659d7538e62faa3ed08e46afb0ddae
https://github.com/ferodia/MichiGAN/tree/a49acb49f9659d7538e62faa3ed08e46afb0ddae
Attention
import math import torch import torch.nn.functional as F import torch.nn as nn class Attention(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: :param out_dim: :param n_head: num of head (Multi-Head Attention) :param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot) :return (?, q_len, out_dim,) """ super(Attention, self).__init__() if hidden_dim is None: hidden_dim = embed_dim // n_head if out_dim is None: out_dim = embed_dim self.embed_dim = embed_dim self.hidden_dim = hidden_dim self.n_head = n_head self.score_function = score_function self.w_k = nn.Linear(embed_dim, n_head * hidden_dim) self.w_q = nn.Linear(embed_dim, n_head * hidden_dim) self.proj = nn.Linear(n_head * hidden_dim, out_dim) self.dropout = nn.Dropout(dropout) if score_function == 'mlp': self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2)) elif self.score_function == 'bi_linear': self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_dim) if self.weight is not None: self.weight.data.uniform_(-stdv, stdv) def forward(self, k, q): if len(q.shape) == 2: q = torch.unsqueeze(q, dim=1) if len(k.shape) == 2: k = torch.unsqueeze(k, dim=1) mb_size = k.shape[0] k_len = k.shape[1] q_len = q.shape[1] kx = self.w_k(k).view(mb_size, k_len, self.n_head, self.hidden_dim) kx = kx.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self. hidden_dim) qx = self.w_q(q).view(mb_size, q_len, self.n_head, self.hidden_dim) qx = qx.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self. hidden_dim) if self.score_function == 'dot_product': kt = kx.permute(0, 2, 1) score = torch.bmm(qx, kt) elif self.score_function == 'scaled_dot_product': kt = kx.permute(0, 2, 1) qkt = torch.bmm(qx, kt) score = torch.div(qkt, math.sqrt(self.hidden_dim)) elif self.score_function == 'mlp': kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1) qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1) kq = torch.cat((kxx, qxx), dim=-1) score = torch.tanh(torch.matmul(kq, self.weight)) elif self.score_function == 'bi_linear': qw = torch.matmul(qx, self.weight) kt = kx.permute(0, 2, 1) score = torch.bmm(qw, kt) else: raise RuntimeError('invalid score_function') score = F.softmax(score, dim=-1) output = torch.bmm(score, kx) output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1) output = self.proj(output) output = self.dropout(output) return output, score def get_inputs(): return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4])] def get_init_inputs(): return [[], {'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), buf4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0) class AttentionNew(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: :param out_dim: :param n_head: num of head (Multi-Head Attention) :param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot) :return (?, q_len, out_dim,) """ super(AttentionNew, self).__init__() if hidden_dim is None: hidden_dim = embed_dim // n_head if out_dim is None: out_dim = embed_dim self.embed_dim = embed_dim self.hidden_dim = hidden_dim self.n_head = n_head self.score_function = score_function self.w_k = nn.Linear(embed_dim, n_head * hidden_dim) self.w_q = nn.Linear(embed_dim, n_head * hidden_dim) self.proj = nn.Linear(n_head * hidden_dim, out_dim) self.dropout = nn.Dropout(dropout) if score_function == 'mlp': self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2)) elif self.score_function == 'bi_linear': self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_dim) if self.weight is not None: self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0, input_1): primals_3 = self.w_k.weight primals_4 = self.w_k.bias primals_5 = self.w_q.weight primals_6 = self.w_q.bias primals_7 = self.proj.weight primals_8 = self.proj.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
fhamborg/NewsMTSC
Attention
false
15,352
[ "MIT" ]
46
5a8f88d7fbb921090e984cc378b02d75524c1025
https://github.com/fhamborg/NewsMTSC/tree/5a8f88d7fbb921090e984cc378b02d75524c1025
Round
import torch import torch.utils.data import torch.nn as nn class Quant(torch.autograd.Function): @staticmethod def forward(ctx, input): input = torch.clamp(input, 0, 255.0) output = input.round() * 1.0 return output @staticmethod def backward(ctx, grad_output): return grad_output class Round(nn.Module): def __init__(self): super(Round, self).__init__() def forward(self, input, **kwargs): return Quant.apply(input * 255.0) / 255.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_mul_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 255.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = triton_helpers.minimum(tmp4, tmp1) tmp6 = libdevice.nearbyint(tmp5) tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = 0.00392156862745098 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_mul_round_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class Quant(torch.autograd.Function): @staticmethod def forward(ctx, input): input = torch.clamp(input, 0, 255.0) output = input.round() * 1.0 return output @staticmethod def backward(ctx, grad_output): return grad_output class RoundNew(nn.Module): def __init__(self): super(RoundNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
felixcheng97/IICNet
Round
false
15,353
[ "MIT" ]
50
2648d7148c01a03226128c24a285c4a52e2b5aa0
https://github.com/felixcheng97/IICNet/tree/2648d7148c01a03226128c24a285c4a52e2b5aa0
PadSameConv2d
import math import torch import torch.nn.functional as F class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](arg0_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class PadSameConv2dNew(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fish258/MonoRec
PadSameConv2d
false
15,354
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
Encoder
import torch import torch.nn as nn class Encoder(nn.Module): def __init__(self, in_size, latent_size): super().__init__() self.linear1 = nn.Linear(in_size, int(in_size / 2)) self.linear2 = nn.Linear(int(in_size / 2), int(in_size / 4)) self.linear3 = nn.Linear(int(in_size / 4), latent_size) self.relu = nn.ReLU(True) def forward(self, w): out = self.linear1(w) out = self.relu(out) out = self.linear2(out) out = self.relu(out) out = self.linear3(out) z = self.relu(out) return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2 * x1 + 8 * (x1 % 4 // 4) + 32 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x0 % 4 // 4) + 16 * ((4 * (x0 // 4 % 4) + x0 % 4) // 16)), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_view_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 2), (2, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 1), (1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 buf11 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1, primals_2, buf11, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) triton_poi_fused_view_1[grid(128)](buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (2, 1), (1, 2 ), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(64)](buf4, primals_5, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused_view_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (1, 4), (1, 1 ), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_view_4[grid(256)](buf7, primals_7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del primals_7 return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, buf9, primals_6, buf10, primals_4, buf11 class EncoderNew(nn.Module): def __init__(self, in_size, latent_size): super().__init__() self.linear1 = nn.Linear(in_size, int(in_size / 2)) self.linear2 = nn.Linear(int(in_size / 2), int(in_size / 4)) self.linear3 = nn.Linear(int(in_size / 4), latent_size) self.relu = nn.ReLU(True) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
finloop/usad
Encoder
false
15,355
[ "BSD-3-Clause" ]
65
5e1bf326af5f1325fa4676a2de978cae6db0481c
https://github.com/finloop/usad/tree/5e1bf326af5f1325fa4676a2de978cae6db0481c
Offset
import torch from torch import nn class Offset(nn.Module): def __init__(self, init_value=0.0): super(Offset, self).__init__() self.bias = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input): return input + self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class OffsetNew(nn.Module): def __init__(self, init_value=0.0): super(OffsetNew, self).__init__() self.bias = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
flipson/dd3d
Offset
false
15,356
[ "MIT" ]
227
86d8660c29612b79836dad9b6c39972ac2ca1557
https://github.com/flipson/dd3d/tree/86d8660c29612b79836dad9b6c39972ac2ca1557
GlobalSumPool2d
import torch import torch.nn as nn import torch.utils.cpp_extension class GlobalSumPool2d(nn.Module): def forward(self, x): return torch.sum(x, [2, 3]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_sum_0[grid(16)](arg0_1, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return buf0, class GlobalSumPool2dNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
STomoya/animeface
GlobalSumPool2d
false
15,357
[ "MIT" ]
61
37b3cd26097d7874559d4c152e41e5712b7a1a42
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
period_L2
import torch import numpy as np import torch.nn as nn def reduction_mean(loss): return loss.mean() def reduction_none(loss): return loss def reduction_sum(loss): return loss.sum() class period_L2(nn.Module): def __init__(self, reduction='sum'): """ periodic Squared Error """ super().__init__() if reduction == 'sum': self.reduction = reduction_sum elif reduction == 'mean': self.reduction = reduction_mean elif reduction == 'none': self.reduction = reduction_none else: raise Exception('unknown reduction') def forward(self, theta_pred, theta_gt): dt = theta_pred - theta_gt loss = (torch.remainder(dt - np.pi / 2, np.pi) - np.pi / 2) ** 2 assert (loss >= 0).all() loss = self.reduction(loss) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_pow_remainder_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 1.5707963267948966 tmp4 = tmp2 - tmp3 tmp5 = 3.141592653589793 tmp6 = tmp4 % tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp6 != tmp7 tmp9 = libdevice.signbit(tmp6) if tmp6.dtype is tl.float32 else tmp6 < 0 tmp10 = libdevice.signbit(tmp5) if tmp5.dtype is tl.float32 else tmp5 < 0 tmp11 = tmp9 != tmp10 tmp12 = tmp8 & tmp11 tmp13 = tmp6 + tmp5 tmp14 = tl.where(tmp12, tmp13, tmp6) tmp15 = tmp14 - tmp3 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_pow_remainder_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, def reduction_mean(loss): return loss.mean() def reduction_none(loss): return loss def reduction_sum(loss): return loss.sum() class period_L2New(nn.Module): def __init__(self, reduction='sum'): """ periodic Squared Error """ super().__init__() if reduction == 'sum': self.reduction = reduction_sum elif reduction == 'mean': self.reduction = reduction_mean elif reduction == 'none': self.reduction = reduction_none else: raise Exception('unknown reduction') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
flytocc/RAPiD
period_L2
false
15,358
[ "MIT" ]
142
92e6a44b8a0107def055e93c971d78fd548562f8
https://github.com/flytocc/RAPiD/tree/92e6a44b8a0107def055e93c971d78fd548562f8
ConvReLU2
import math import torch import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import LeakyReLU class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvReLU2(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, leaky_relu_neg_slope=0.1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad_0 = PadSameConv2d(kernel_size=(kernel_size, 1), stride=( stride, 1)) self.conv_y = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=(kernel_size, 1), stride=(stride, 1)) self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope) self.pad_1 = PadSameConv2d(kernel_size=(1, kernel_size), stride=(1, stride)) self.conv_x = Conv2d(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, kernel_size), stride=(1, stride)) def forward(self, x: 'torch.Tensor'): t = self.pad_0(x) t = self.conv_y(t) t = self.leaky_relu(t) t = self.pad_1(t) t = self.conv_x(t) return self.leaky_relu(t) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import LeakyReLU assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 7 x2 = xindex // 28 x3 = xindex % 28 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x4 = xindex // 7 x2 = xindex // 28 % 4 x5 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0).to(tl .int1) tmp7 = tl.load(in_ptr1 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0) tmp8 = tl.load(in_ptr2 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = 0.1 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp6, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 4), (112, 28, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(448)](primals_1, buf0, 448, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 7), (112, 28, 7, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2[grid(448)]( buf2, buf1, primals_3, buf3, 448, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf6 = buf1 del buf1 triton_poi_fused_convolution_leaky_relu_3[grid(256)](buf4, primals_5, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 return buf6, primals_2, primals_4, buf0, buf2, buf3, buf5 class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvReLU2New(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, leaky_relu_neg_slope=0.1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad_0 = PadSameConv2d(kernel_size=(kernel_size, 1), stride=( stride, 1)) self.conv_y = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=(kernel_size, 1), stride=(stride, 1)) self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope) self.pad_1 = PadSameConv2d(kernel_size=(1, kernel_size), stride=(1, stride)) self.conv_x = Conv2d(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, kernel_size), stride=(1, stride)) def forward(self, input_0): primals_2 = self.conv_y.weight primals_3 = self.conv_y.bias primals_4 = self.conv_x.weight primals_5 = self.conv_x.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
fish258/MonoRec
ConvReLU2
false
15,359
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
ChannelSELayer
import torch import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * class ChannelSELayer(nn.Module): """ Copied from https://github.com/ai-med/squeeze_and_excitation/blob/master/squeeze_and_excitation/squeeze_and_excitation.py Re-implementation of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* MIT License Copyright (c) 2018 Abhijit Guha Roy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ def __init__(self, num_channels, reduction_ratio=2, dim_out=None, stride=1 ): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSELayer, self).__init__() if dim_out is not None: assert dim_out == num_channels, (dim_out, num_channels, 'only same dimensionality is supported') num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.stride = stride self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.relu = nn.ReLU() self.sigmoid = nn.Hardswish() def forward(self, input_tensor): """ :param input_tensor: X, shape = (batch_size, num_channels, H, W) :return: output tensor """ batch_size, num_channels, _H, _W = input_tensor.size() squeeze_tensor = input_tensor.reshape(batch_size, num_channels, -1 ).mean(dim=2) fc_out_1 = self.relu(self.fc1(squeeze_tensor)) fc_out_2 = self.sigmoid(self.fc2(fc_out_1)) a, b = squeeze_tensor.size() output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1)) if self.stride > 1: output_tensor = output_tensor[:, :, ::self.stride, ::self.stride] return output_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 + tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 6.0 tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = tmp1 * tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp8 * tmp9 tmp11 = tmp0 * tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_2, (4, 2), (1, 4 ), 0), out=buf2) del primals_2 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(8)](buf3, primals_3, 8, XBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, buf1, buf3, buf4, primals_4 class ChannelSELayerNew(nn.Module): """ Copied from https://github.com/ai-med/squeeze_and_excitation/blob/master/squeeze_and_excitation/squeeze_and_excitation.py Re-implementation of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* MIT License Copyright (c) 2018 Abhijit Guha Roy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ def __init__(self, num_channels, reduction_ratio=2, dim_out=None, stride=1 ): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSELayerNew, self).__init__() if dim_out is not None: assert dim_out == num_channels, (dim_out, num_channels, 'only same dimensionality is supported') num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.stride = stride self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.relu = nn.ReLU() self.sigmoid = nn.Hardswish() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
evdcush/ppuda
ChannelSELayer
false
15,360
[ "MIT" ]
262
22783ac92207da6730ee618c953af230c5c39f28
https://github.com/evdcush/ppuda/tree/22783ac92207da6730ee618c953af230c5c39f28
Upconv
import math import torch import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import Upsample class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class Upconv(torch.nn.Module): def __init__(self, in_channels, out_channels): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.upsample = Upsample(scale_factor=2) self.pad = PadSameConv2d(kernel_size=2) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=2, stride=1) def forward(self, x: 'torch.Tensor'): t = self.upsample(x) t = self.pad(t) return self.conv(t) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import Upsample assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 9 % 9 x0 = xindex % 9 x2 = xindex // 81 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 8, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tmp0.to(tl.float32) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8.to(tl.int32) tmp10 = tmp3.to(tl.float32) tmp11 = tmp10 * tmp7 tmp12 = tmp11.to(tl.int32) tmp13 = tl.load(in_ptr0 + (tmp12 + 4 * tmp9 + 16 * x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_constant_pad_nd_0[grid(1296)](primals_1, buf0, 1296, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class UpconvNew(torch.nn.Module): def __init__(self, in_channels, out_channels): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.upsample = Upsample(scale_factor=2) self.pad = PadSameConv2d(kernel_size=2) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=2, stride=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fish258/MonoRec
Upconv
false
15,361
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
OrthogonalFusion
import torch import torch.nn as nn class OrthogonalFusion(nn.Module): def __init__(self): super().__init__() def forward(self, local_feat, global_feat): global_feat_norm = torch.norm(global_feat, p=2, dim=1) projection = torch.bmm(global_feat.unsqueeze(1), torch.flatten( local_feat, start_dim=2)) projection = torch.bmm(global_feat.unsqueeze(2), projection).view( local_feat.size()) projection = projection / (global_feat_norm * global_feat_norm).view( -1, 1, 1, 1) orthogonal_comp = local_feat - projection global_feat = global_feat.unsqueeze(-1).unsqueeze(-1) return torch.cat([global_feat.expand(orthogonal_comp.size()), orthogonal_comp], dim=1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x2 = xindex // 128 x0 = xindex % 16 x4 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + 4 * x2, tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp11 * tmp11 tmp13 = tl.load(in_ptr0 + (1 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tl.load(in_ptr0 + (2 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tl.load(in_ptr0 + (3 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = libdevice.sqrt(tmp21) tmp23 = tmp22 * tmp22 tmp24 = tmp10 / tmp23 tmp25 = tmp9 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp6, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp5, tmp27) tl.store(out_ptr0 + x4, tmp28, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 1, 4), (4, 4, 1), 0), arg1_1, out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 0), buf0, out=buf1) del buf0 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf1, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf1 return buf2, class OrthogonalFusionNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
flrngel/DOLG-pytorch
OrthogonalFusion
false
15,362
[ "MIT" ]
56
97732d2932ef6733f17cf8ac1aee990effe6fd64
https://github.com/flrngel/DOLG-pytorch/tree/97732d2932ef6733f17cf8ac1aee990effe6fd64
compute_g_spa
import torch import torch.nn as nn class cnn1x1(nn.Module): def __init__(self, dim1=3, dim2=3, bias=True): super(cnn1x1, self).__init__() self.cnn = nn.Conv2d(dim1, dim2, kernel_size=1, bias=bias) def forward(self, x): x = self.cnn(x) return x class compute_g_spa(nn.Module): def __init__(self, dim1=64 * 3, dim2=64 * 3, bias=False): super(compute_g_spa, self).__init__() self.dim1 = dim1 self.dim2 = dim2 self.g1 = cnn1x1(self.dim1, self.dim2, bias=bias) self.g2 = cnn1x1(self.dim1, self.dim2, bias=bias) self.softmax = nn.Softmax(dim=-1) def forward(self, x1): g1 = self.g1(x1).permute(0, 3, 2, 1).contiguous() g2 = self.g2(x1).permute(0, 3, 1, 2).contiguous() g3 = g1.matmul(g2) g = self.softmax(g3) return g def get_inputs(): return [torch.rand([4, 192, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 768 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 192 y1 = yindex // 192 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 192 * x2 + 786432 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 192 x1 = xindex // 192 % 64 x2 = xindex // 12288 % 64 x3 = xindex // 786432 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 192 * x2 + 12288 * x1 + 786432 * x3), None) tl.store(out_ptr0 + x4, tmp0, None) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 12288 y1 = yindex // 12288 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12288 * x2 + 786432 * y1), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 64 * y3), tmp0, xmask) @triton.jit def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + (r1 + 64 * x0), tmp9, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (192, 192, 1, 1), (192, 1, 1, 1)) assert_size_stride(primals_2, (4, 192, 64, 64), (786432, 4096, 64, 1)) assert_size_stride(primals_3, (192, 192, 1, 1), (192, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 192, 64, 64), (786432, 1, 12288, 192), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(768, 4096)](primals_2, buf0, 768, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 192, 64, 64), (786432, 1, 12288, 192)) buf2 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 192, 64, 64), (786432, 1, 12288, 192)) buf3 = empty_strided_cuda((4, 64, 64, 192), (786432, 12288, 192, 1), torch.float32) triton_poi_fused_clone_1[grid(3145728)](buf1, buf3, 3145728, XBLOCK =512, num_warps=8, num_stages=1) buf4 = reinterpret_tensor(buf1, (4, 64, 192, 64), (786432, 12288, 64, 1), 0) del buf1 triton_poi_fused_clone_2[grid(49152, 64)](buf2, buf4, 49152, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf2 buf5 = empty_strided_cuda((256, 64, 64), (4096, 64, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (256, 64, 192), (12288, 192, 1), 0), reinterpret_tensor(buf4, (256, 192, 64), (12288, 64, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_per_fused__softmax_3[grid(16384)](buf5, buf8, 16384, 64, XBLOCK=8, num_warps=4, num_stages=1) del buf5 return buf8, primals_1, buf0, primals_3, buf8, reinterpret_tensor(buf3, (256, 192, 64), (12288, 1, 192), 0), reinterpret_tensor(buf4, (256, 64, 192), (12288, 1, 64), 0) class cnn1x1(nn.Module): def __init__(self, dim1=3, dim2=3, bias=True): super(cnn1x1, self).__init__() self.cnn = nn.Conv2d(dim1, dim2, kernel_size=1, bias=bias) def forward(self, x): x = self.cnn(x) return x class compute_g_spaNew(nn.Module): def __init__(self, dim1=64 * 3, dim2=64 * 3, bias=False): super(compute_g_spaNew, self).__init__() self.dim1 = dim1 self.dim2 = dim2 self.g1 = cnn1x1(self.dim1, self.dim2, bias=bias) self.g2 = cnn1x1(self.dim1, self.dim2, bias=bias) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0): primals_1 = self.g1.cnn.weight primals_3 = self.g2.cnn.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fabro66/Online-Skeleton-based-Action-Recognition
compute_g_spa
false
15,363
[ "MIT" ]
63
de00cbf17ceea98a7d07f68bbbd966bfd02d3b40
https://github.com/fabro66/Online-Skeleton-based-Action-Recognition/tree/de00cbf17ceea98a7d07f68bbbd966bfd02d3b40
CompositeActivation
import torch class CompositeActivation(torch.nn.Module): def forward(self, x): x = torch.atan(x) return torch.cat([x / 0.67, x * x / 0.6], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = libdevice.atan(tmp5) tmp7 = 1.4925373134328357 tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp11 & xmask, other=0.0) tmp15 = libdevice.atan(tmp14) tmp16 = tmp15 * tmp15 tmp17 = 1.6666666666666667 tmp18 = tmp16 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp11, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp10, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CompositeActivationNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fuzhanrahmanian/lucent
CompositeActivation
false
15,364
[ "Apache-2.0" ]
449
13b24c3c37784185275da73c7a11095b2ae809c5
https://github.com/fuzhanrahmanian/lucent/tree/13b24c3c37784185275da73c7a11095b2ae809c5
AddAndNorm
import torch import torch.nn as nn class AddAndNorm(nn.Module): def __init__(self, d_model): super(AddAndNorm, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, x, residual): return self.layer_norm(x + residual) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0[grid(64)](primals_1, primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(256)](primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 del primals_3 del primals_4 return buf3, buf2 class AddAndNormNew(nn.Module): def __init__(self, d_model): super(AddAndNormNew, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, input_0, input_1): primals_3 = self.layer_norm.weight primals_4 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
francismontalbo/attention-is-all-you-need-paper
AddAndNorm
false
15,365
[ "MIT" ]
167
21ba3e48917da0c6808126d183bece6a9969cfd2
https://github.com/francismontalbo/attention-is-all-you-need-paper/tree/21ba3e48917da0c6808126d183bece6a9969cfd2
ConvSig
import math import torch import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import Sigmoid class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvSig(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride) self.sig = Sigmoid() def forward(self, x: 'torch.Tensor'): t = self.pad(x) t = self.conv(t) return self.sig(t) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import Sigmoid assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_sigmoid_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvSigNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride) self.sig = Sigmoid() def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fish258/MonoRec
ConvSig
false
15,366
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
SqueezeEmbedding
import torch import torch.nn as nn class SqueezeEmbedding(nn.Module): """ Squeeze sequence embedding length to the longest one in the batch """ def __init__(self, batch_first=True): super(SqueezeEmbedding, self).__init__() self.batch_first = batch_first def forward(self, x, x_len): """ sequence -> sort -> pad and pack -> unpack ->unsort :param x: sequence embedding vectors :param x_len: numpy/tensor list :return: """ """sort""" x_sort_idx = torch.sort(-x_len)[1].long() x_unsort_idx = torch.sort(x_sort_idx)[1].long() x_len = x_len[x_sort_idx] x = x[x_sort_idx] """pack""" x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len.cpu(), batch_first=self.batch_first) """unpack: out""" out = torch.nn.utils.rnn.pad_packed_sequence(x_emb_p, batch_first= self.batch_first) out = out[0] """unsort""" out = out[x_unsort_idx] return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_index_neg_sort_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = -tmp0 tmp2 = r0 tmp3 = tmp2.to(tl.int16) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) _tmp6, tmp7 = triton_helpers.sort_with_index(tmp4, tmp5, None, 1, stable=False, descending=False) tmp8 = tmp7.to(tl.int64) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) _tmp10, tmp11 = triton_helpers.sort_with_index(tmp9, tmp5, None, 1, stable=False, descending=False) tmp12 = tmp11.to(tl.int64) tmp13 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp14 = tmp8 + tmp13 tmp15 = tmp8 < 0 tmp16 = tl.where(tmp15, tmp14, tmp8) tl.device_assert((0 <= tmp16) & (tmp16 < 4), 'index out of bounds: 0 <= tmp16 < 4') tmp18 = tl.load(in_ptr0 + tmp16, None, eviction_policy='evict_last') tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None) tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None) @triton.jit def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (x0 + 64 * tmp5), xmask) tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4,), (1,), torch.int16) buf4 = empty_strided_cuda((4,), (1,), torch.int64) buf6 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_per_fused_index_neg_sort_0[grid(1)](arg0_1, buf1, buf4, buf6, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_index_1[grid(256)](buf1, arg1_1, buf5, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg1_1 del buf1 buf7 = empty_strided_cpu((4,), (1,), torch.int64) buf7.copy_(buf6) return buf5, buf7, buf4 class SqueezeEmbeddingNew(nn.Module): """ Squeeze sequence embedding length to the longest one in the batch """ def __init__(self, batch_first=True): super(SqueezeEmbeddingNew, self).__init__() self.batch_first = batch_first def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
froth-synthesio/PyABSA
SqueezeEmbedding
false
15,367
[ "MIT" ]
199
61406e7a49f93f6c986dfd7e583d730b69c2861c
https://github.com/froth-synthesio/PyABSA/tree/61406e7a49f93f6c986dfd7e583d730b69c2861c
period_L1
import torch import numpy as np import torch.nn as nn class period_L1(nn.Module): def __init__(self, reduction='sum'): """ periodic Squared Error """ super().__init__() self.reduction = reduction def forward(self, theta_pred, theta_gt): dt = theta_pred - theta_gt dt = torch.abs(torch.remainder(dt - np.pi / 2, np.pi) - np.pi / 2) assert (dt >= 0).all() if self.reduction == 'sum': loss = dt.sum() elif self.reduction == 'mean': loss = dt.mean() elif self.reduction == 'none': loss = dt return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_remainder_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 1.5707963267948966 tmp4 = tmp2 - tmp3 tmp5 = 3.141592653589793 tmp6 = tmp4 % tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp6 != tmp7 tmp9 = libdevice.signbit(tmp6) if tmp6.dtype is tl.float32 else tmp6 < 0 tmp10 = libdevice.signbit(tmp5) if tmp5.dtype is tl.float32 else tmp5 < 0 tmp11 = tmp9 != tmp10 tmp12 = tmp8 & tmp11 tmp13 = tmp6 + tmp5 tmp14 = tl.where(tmp12, tmp13, tmp6) tmp15 = tmp14 - tmp3 tmp16 = tl_math.abs(tmp15) tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_abs_remainder_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class period_L1New(nn.Module): def __init__(self, reduction='sum'): """ periodic Squared Error """ super().__init__() self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
flytocc/RAPiD
period_L1
false
15,368
[ "MIT" ]
142
92e6a44b8a0107def055e93c971d78fd548562f8
https://github.com/flytocc/RAPiD/tree/92e6a44b8a0107def055e93c971d78fd548562f8
ConvReLU
import math import torch import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import LeakyReLU class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvReLU(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, leaky_relu_neg_slope=0.1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride) self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope) def forward(self, x: 'torch.Tensor'): t = self.pad(x) t = self.conv(t) return self.leaky_relu(t) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import LeakyReLU assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 return buf3, primals_2, buf0, buf2 class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvReLUNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, leaky_relu_neg_slope=0.1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride) self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fish258/MonoRec
ConvReLU
false
15,369
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
Block
import torch import torch.nn as nn class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale if mask is not None: mask = mask.bool() attn = attn.masked_fill(~mask[:, None, None, :], float('-inf')) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, mask=None): _x, attn = self.attn(self.norm1(x), mask=mask) x = x + self.drop_path(_x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12, primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0) del buf7 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12, primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return buf19, buf8, primals_3, primals_6, primals_7, reinterpret_tensor( buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0 ), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4 class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale if mask is not None: mask = mask.bool() attn = attn.masked_fill(~mask[:, None, None, :], float('-inf')) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn class BlockNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, input_0): primals_1 = self.norm1.weight primals_2 = self.norm1.bias primals_4 = self.attn.qkv.weight primals_5 = self.attn.proj.weight primals_6 = self.attn.proj.bias primals_7 = self.norm2.weight primals_8 = self.norm2.bias primals_9 = self.mlp.fc1.weight primals_10 = self.mlp.fc1.bias primals_11 = self.mlp.fc2.weight primals_12 = self.mlp.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
fiveflowers/ViLT
Block
false
15,370
[ "Apache-2.0" ]
587
762fd3975c180db6fc88f577cf39549983fa373a
https://github.com/fiveflowers/ViLT/tree/762fd3975c180db6fc88f577cf39549983fa373a
ATLoss
import torch import torch.nn as nn def multilabel_categorical_crossentropy(y_pred, y_true): y_pred = (1 - 2 * y_true) * y_pred y_pred_neg = y_pred - y_true * 1000000000000.0 y_pred_pos = y_pred - (1 - y_true) * 1000000000000.0 zeros = torch.zeros_like(y_pred[..., :1]) y_pred_neg = torch.cat([y_pred_neg, zeros], dim=-1) y_pred_pos = torch.cat([y_pred_pos, zeros], dim=-1) neg_loss = torch.logsumexp(y_pred_neg, dim=-1) pos_loss = torch.logsumexp(y_pred_pos, dim=-1) return (neg_loss + pos_loss).mean() class ATLoss(nn.Module): def __init__(self): super().__init__() def forward(self, logits, labels): loss = multilabel_categorical_crossentropy(labels, logits) loss = loss.mean() return loss def get_label(self, logits, num_labels=-1): th_logit = torch.zeros_like(logits[..., :1]) output = torch.zeros_like(logits) mask = logits > th_logit if num_labels > 0: top_v, _ = torch.topk(logits, num_labels, dim=1) top_v = top_v[:, -1] mask = (logits >= top_v.unsqueeze(1)) & mask output[mask] = 1.0 output[:, 0] = output[:, 1:].sum(1) == 0.0 return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_logsumexp_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.full([1], 0, tl.int64) tmp2 = tl.full([1], 4, tl.int64) tmp3 = tmp0 < tmp2 tmp4 = tl.load(in_ptr0 + (4 * x0 + 0), tmp3 & xmask, eviction_policy= 'evict_last', other=0.0) tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp9 = tl.load(in_ptr1 + (4 * x0 + 0), tmp3 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tmp8 * tmp9 tmp11 = 1000000000000.0 tmp12 = tmp4 * tmp11 tmp13 = tmp10 - tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp3, tmp13, tmp14) tmp16 = tmp0 >= tmp2 tl.full([1], 5, tl.int64) tmp19 = 0.0 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp16, tmp19, tmp20) tmp22 = tl.where(tmp3, tmp15, tmp21) tmp23 = tl.full([1], 1, tl.int64) tmp25 = tmp23 < tmp2 tmp26 = tl.load(in_ptr0 + (4 * x0 + 1), tmp25 & xmask, eviction_policy= 'evict_last', other=0.0) tmp27 = tmp26 * tmp5 tmp28 = tmp7 - tmp27 tmp29 = tl.load(in_ptr1 + (4 * x0 + 1), tmp25 & xmask, eviction_policy= 'evict_last', other=0.0) tmp30 = tmp28 * tmp29 tmp31 = tmp26 * tmp11 tmp32 = tmp30 - tmp31 tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp25, tmp32, tmp33) tmp35 = tmp23 >= tmp2 tmp37 = tl.where(tmp35, tmp19, tmp20) tmp38 = tl.where(tmp25, tmp34, tmp37) tmp39 = triton_helpers.maximum(tmp22, tmp38) tmp40 = tl.full([1], 2, tl.int64) tmp42 = tmp40 < tmp2 tmp43 = tl.load(in_ptr0 + (4 * x0 + 2), tmp42 & xmask, eviction_policy= 'evict_last', other=0.0) tmp44 = tmp43 * tmp5 tmp45 = tmp7 - tmp44 tmp46 = tl.load(in_ptr1 + (4 * x0 + 2), tmp42 & xmask, eviction_policy= 'evict_last', other=0.0) tmp47 = tmp45 * tmp46 tmp48 = tmp43 * tmp11 tmp49 = tmp47 - tmp48 tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp42, tmp49, tmp50) tmp52 = tmp40 >= tmp2 tmp54 = tl.where(tmp52, tmp19, tmp20) tmp55 = tl.where(tmp42, tmp51, tmp54) tmp56 = triton_helpers.maximum(tmp39, tmp55) tmp57 = tl.full([1], 3, tl.int64) tmp59 = tmp57 < tmp2 tmp60 = tl.load(in_ptr0 + (4 * x0 + 3), tmp59 & xmask, eviction_policy= 'evict_last', other=0.0) tmp61 = tmp60 * tmp5 tmp62 = tmp7 - tmp61 tmp63 = tl.load(in_ptr1 + (4 * x0 + 3), tmp59 & xmask, eviction_policy= 'evict_last', other=0.0) tmp64 = tmp62 * tmp63 tmp65 = tmp60 * tmp11 tmp66 = tmp64 - tmp65 tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype) tmp68 = tl.where(tmp59, tmp66, tmp67) tmp69 = tmp57 >= tmp2 tmp71 = tl.where(tmp69, tmp19, tmp20) tmp72 = tl.where(tmp59, tmp68, tmp71) tmp73 = triton_helpers.maximum(tmp56, tmp72) tmp75 = tmp2 < tmp2 tmp76 = tl.load(in_ptr0 + (4 * x0 + 4), tmp75 & xmask, eviction_policy= 'evict_last', other=0.0) tmp77 = tmp76 * tmp5 tmp78 = tmp7 - tmp77 tmp79 = tl.load(in_ptr1 + (4 * x0 + 4), tmp75 & xmask, eviction_policy= 'evict_last', other=0.0) tmp80 = tmp78 * tmp79 tmp81 = tmp76 * tmp11 tmp82 = tmp80 - tmp81 tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype) tmp84 = tl.where(tmp75, tmp82, tmp83) tmp85 = tmp2 >= tmp2 tmp87 = tl.where(tmp85, tmp19, tmp20) tmp88 = tl.where(tmp75, tmp84, tmp87) tmp89 = triton_helpers.maximum(tmp73, tmp88) tmp90 = tl_math.abs(tmp89) tmp91 = float('inf') tmp92 = tmp90 == tmp91 tmp93 = tl.where(tmp92, tmp19, tmp89) tmp94 = tmp22 - tmp93 tmp95 = tl_math.exp(tmp94) tmp96 = tmp38 - tmp93 tmp97 = tl_math.exp(tmp96) tmp98 = tmp95 + tmp97 tmp99 = tmp55 - tmp93 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp72 - tmp93 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp88 - tmp93 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp108 = tmp7 - tmp4 tmp109 = tmp108 * tmp11 tmp110 = tmp10 - tmp109 tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype) tmp112 = tl.where(tmp3, tmp110, tmp111) tmp113 = tl.where(tmp3, tmp112, tmp21) tmp114 = tmp7 - tmp26 tmp115 = tmp114 * tmp11 tmp116 = tmp30 - tmp115 tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype) tmp118 = tl.where(tmp25, tmp116, tmp117) tmp119 = tl.where(tmp25, tmp118, tmp37) tmp120 = triton_helpers.maximum(tmp113, tmp119) tmp121 = tmp7 - tmp43 tmp122 = tmp121 * tmp11 tmp123 = tmp47 - tmp122 tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype) tmp125 = tl.where(tmp42, tmp123, tmp124) tmp126 = tl.where(tmp42, tmp125, tmp54) tmp127 = triton_helpers.maximum(tmp120, tmp126) tmp128 = tmp7 - tmp60 tmp129 = tmp128 * tmp11 tmp130 = tmp64 - tmp129 tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype) tmp132 = tl.where(tmp59, tmp130, tmp131) tmp133 = tl.where(tmp59, tmp132, tmp71) tmp134 = triton_helpers.maximum(tmp127, tmp133) tmp135 = tmp7 - tmp76 tmp136 = tmp135 * tmp11 tmp137 = tmp80 - tmp136 tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype) tmp139 = tl.where(tmp75, tmp137, tmp138) tmp140 = tl.where(tmp75, tmp139, tmp87) tmp141 = triton_helpers.maximum(tmp134, tmp140) tl.store(out_ptr0 + x0, tmp89, xmask) tl.store(out_ptr1 + x0, tmp107, xmask) tl.store(out_ptr2 + x0, tmp141, xmask) @triton.jit def triton_poi_fused_cat_logsumexp_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp25 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 * tmp10 tmp12 = tmp8 - tmp5 tmp13 = 1000000000000.0 tmp14 = tmp12 * tmp13 tmp15 = tmp11 - tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp4, tmp15, tmp16) tmp18 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp21 = 0.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp18, tmp21, tmp22) tmp24 = tl.where(tmp4, tmp17, tmp23) tmp26 = tl_math.abs(tmp25) tmp27 = float('inf') tmp28 = tmp26 == tmp27 tmp29 = tl.where(tmp28, tmp21, tmp25) tmp30 = tmp24 - tmp29 tmp31 = tl_math.exp(tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_per_fused_add_logsumexp_mean_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp9 = tl.load(in_ptr2 + 5 * r0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 5 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (2 + 5 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (3 + 5 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (4 + 5 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr3 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.abs(tmp2) tmp4 = float('inf') tmp5 = tmp3 == tmp4 tmp6 = 0.0 tmp7 = tl.where(tmp5, tmp6, tmp2) tmp8 = tmp1 + tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl_math.log(tmp17) tmp20 = tl_math.abs(tmp19) tmp21 = tmp20 == tmp4 tmp22 = tl.where(tmp21, tmp6, tmp19) tmp23 = tmp18 + tmp22 tmp24 = tmp8 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tmp30 = 1.0 tmp31 = tmp29 / tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_cat_logsumexp_0[grid(64)](arg1_1, arg0_1, buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_cat_logsumexp_1[grid(320)](arg1_1, arg0_1, buf2, buf3, 320, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_add_logsumexp_mean_2[grid(1)](buf5, buf1, buf0, buf3, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 del buf3 return buf5, def multilabel_categorical_crossentropy(y_pred, y_true): y_pred = (1 - 2 * y_true) * y_pred y_pred_neg = y_pred - y_true * 1000000000000.0 y_pred_pos = y_pred - (1 - y_true) * 1000000000000.0 zeros = torch.zeros_like(y_pred[..., :1]) y_pred_neg = torch.cat([y_pred_neg, zeros], dim=-1) y_pred_pos = torch.cat([y_pred_pos, zeros], dim=-1) neg_loss = torch.logsumexp(y_pred_neg, dim=-1) pos_loss = torch.logsumexp(y_pred_pos, dim=-1) return (neg_loss + pos_loss).mean() class ATLossNew(nn.Module): def __init__(self): super().__init__() def get_label(self, logits, num_labels=-1): th_logit = torch.zeros_like(logits[..., :1]) output = torch.zeros_like(logits) mask = logits > th_logit if num_labels > 0: top_v, _ = torch.topk(logits, num_labels, dim=1) top_v = top_v[:, -1] mask = (logits >= top_v.unsqueeze(1)) & mask output[mask] = 1.0 output[:, 0] = output[:, 1:].sum(1) == 0.0 return output def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
fmc123653/DeepKE
ATLoss
false
15,371
[ "MIT" ]
676
4d30e51368681c7cb73e2ecacf9b922b441cbe99
https://github.com/fmc123653/DeepKE/tree/4d30e51368681c7cb73e2ecacf9b922b441cbe99
GeM
import torch import torch.nn as nn import torch.nn.functional as F class GeM(nn.Module): def __init__(self, p=3, eps=1e-06, requires_grad=False): super(GeM, self).__init__() self.p = nn.Parameter(torch.ones(1) * p, requires_grad=requires_grad) self.eps = eps def forward(self, x): return self.gem(x, p=self.p, eps=self.eps) def gem(self, x, p=3, eps=1e-06): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1)) ).pow(1.0 / p) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self. p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp38 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp46 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp54 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp58 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp62 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp5 = libdevice.pow(tmp2, tmp4) tmp7 = triton_helpers.maximum(tmp6, tmp1) tmp8 = libdevice.pow(tmp7, tmp4) tmp9 = tmp8 + tmp5 tmp11 = triton_helpers.maximum(tmp10, tmp1) tmp12 = libdevice.pow(tmp11, tmp4) tmp13 = tmp12 + tmp9 tmp15 = triton_helpers.maximum(tmp14, tmp1) tmp16 = libdevice.pow(tmp15, tmp4) tmp17 = tmp16 + tmp13 tmp19 = triton_helpers.maximum(tmp18, tmp1) tmp20 = libdevice.pow(tmp19, tmp4) tmp21 = tmp20 + tmp17 tmp23 = triton_helpers.maximum(tmp22, tmp1) tmp24 = libdevice.pow(tmp23, tmp4) tmp25 = tmp24 + tmp21 tmp27 = triton_helpers.maximum(tmp26, tmp1) tmp28 = libdevice.pow(tmp27, tmp4) tmp29 = tmp28 + tmp25 tmp31 = triton_helpers.maximum(tmp30, tmp1) tmp32 = libdevice.pow(tmp31, tmp4) tmp33 = tmp32 + tmp29 tmp35 = triton_helpers.maximum(tmp34, tmp1) tmp36 = libdevice.pow(tmp35, tmp4) tmp37 = tmp36 + tmp33 tmp39 = triton_helpers.maximum(tmp38, tmp1) tmp40 = libdevice.pow(tmp39, tmp4) tmp41 = tmp40 + tmp37 tmp43 = triton_helpers.maximum(tmp42, tmp1) tmp44 = libdevice.pow(tmp43, tmp4) tmp45 = tmp44 + tmp41 tmp47 = triton_helpers.maximum(tmp46, tmp1) tmp48 = libdevice.pow(tmp47, tmp4) tmp49 = tmp48 + tmp45 tmp51 = triton_helpers.maximum(tmp50, tmp1) tmp52 = libdevice.pow(tmp51, tmp4) tmp53 = tmp52 + tmp49 tmp55 = triton_helpers.maximum(tmp54, tmp1) tmp56 = libdevice.pow(tmp55, tmp4) tmp57 = tmp56 + tmp53 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp4) tmp61 = tmp60 + tmp57 tmp63 = triton_helpers.maximum(tmp62, tmp1) tmp64 = libdevice.pow(tmp63, tmp4) tmp65 = tmp64 + tmp61 tmp66 = 0.0625 tmp67 = tmp65 * tmp66 tmp68 = tl.full([1], 1, tl.int32) tmp69 = tmp68 / tmp4 tmp70 = 1.0 tmp71 = tmp69 * tmp70 tmp72 = libdevice.pow(tmp67, tmp71) tl.store(in_out_ptr0 + x0, tmp72, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (1,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0[grid(16)](buf1, arg1_1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class GeMNew(nn.Module): def __init__(self, p=3, eps=1e-06, requires_grad=False): super(GeMNew, self).__init__() self.p = nn.Parameter(torch.ones(1) * p, requires_grad=requires_grad) self.eps = eps def gem(self, x, p=3, eps=1e-06): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1)) ).pow(1.0 / p) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self. p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')' def forward(self, input_0): arg0_1 = self.p arg1_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
flrngel/DOLG-pytorch
GeM
false
15,372
[ "MIT" ]
56
97732d2932ef6733f17cf8ac1aee990effe6fd64
https://github.com/flrngel/DOLG-pytorch/tree/97732d2932ef6733f17cf8ac1aee990effe6fd64
fusion
import torch import torch.nn as nn from torch.nn import Linear class fusion(nn.Module): def __init__(self, feature_size=768): super(fusion, self).__init__() self.fc1 = Linear(feature_size * 3, 1) self.fc2 = Linear(feature_size * 3, 1) self.fc3 = Linear(feature_size * 3, 1) self.sigmoid = nn.Sigmoid() def forward(self, x1, x2, x3): batch_size = x1.size()[0] x1 = x1.view(-1, 768) x2 = x2.view(-1, 768) x3 = x3.view(-1, 768) x123 = torch.cat((x1, x2), 1) x123 = torch.cat((x123, x3), 1) weight1 = self.fc1(x123) weight2 = self.fc2(x123) weight3 = self.fc3(x123) weight1 = self.sigmoid(weight1) weight2 = self.sigmoid(weight2) weight3 = self.sigmoid(weight3) weight1 = weight1.view(batch_size, -1).unsqueeze(2) weight2 = weight1.view(batch_size, -1).unsqueeze(2) weight3 = weight1.view(batch_size, -1).unsqueeze(2) return weight1, weight2, weight3 def get_inputs(): return [torch.rand([4, 768]), torch.rand([4, 768]), torch.rand([4, 768])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2304 x1 = xindex // 2304 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1536, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 768, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + (768 * x1 + x0), tmp7 & xmask, eviction_policy ='evict_last', other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + (768 * x1 + (-768 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 2304, tl.int64) tmp18 = tl.load(in_ptr2 + (768 * x1 + (-1536 + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp4, tmp14, tmp18) tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 768), (768, 1)) assert_size_stride(primals_2, (4, 768), (768, 1)) assert_size_stride(primals_3, (4, 768), (768, 1)) assert_size_stride(primals_4, (1, 2304), (2304, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 2304), (2304, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 2304), (2304, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2304), (2304, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(9216)](primals_1, primals_2, primals_3, buf0, 9216, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (2304, 1), (1, 2304), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 triton_poi_fused_sigmoid_1[grid(4)](buf2, primals_5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0 ), reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0 ), reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0), buf2, buf0, buf2 class fusionNew(nn.Module): def __init__(self, feature_size=768): super(fusionNew, self).__init__() self.fc1 = Linear(feature_size * 3, 1) self.fc2 = Linear(feature_size * 3, 1) self.fc3 = Linear(feature_size * 3, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_0, input_1, input_2): primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1], output[2]
funnyzhou/REFERS
fusion
false
15,373
[ "MIT" ]
46
392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
https://github.com/funnyzhou/REFERS/tree/392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
LossesOfConVIRT
import torch import torch.nn as nn class LossesOfConVIRT(nn.Module): """ """ def __init__(self, tau=0.1, lambd=0.75): super(LossesOfConVIRT, self).__init__() self.tau = tau self.lambd = lambd def tmp_loss(self, v, u, index): """ """ assert v.size(0) == u.size(0) item1 = torch.exp(torch.divide(torch.cosine_similarity(v[index], u[ index], dim=0), self.tau)) item2 = torch.exp(torch.divide(torch.cosine_similarity(v[index]. unsqueeze(0), u, dim=1), self.tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def image_text(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v[index].unsqueeze(0), u), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def text_image(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v, u[index].unsqueeze(0)), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)).item() return loss def forward(self, v, u): """ :return: """ assert v.size(0) == u.size(0) res = 0.0 v = v.float() u = u.float() for i in range(v.size(0)): res += self.lambd * self.image_text(v, u, i) + (1 - self.lambd ) * self.text_image(v, u, i) res /= v.size(0) return res def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + 1) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp8 = tl.load(in_ptr0 + 2) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp12 = tl.load(in_ptr0 + 3) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp20 = tl.load(in_ptr1 + x2, xmask) tmp21 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp2 * tmp2 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = 1e-08 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp0 / tmp18 tmp22 = tmp21 * tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = libdevice.sqrt(tmp31) tmp33 = triton_helpers.maximum(tmp32, tmp17) tmp34 = tmp20 / tmp33 tmp35 = tmp19 * tmp34 tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp21 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = libdevice.sqrt(tmp4) tmp11 = 1e-08 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp0 / tmp12 tmp14 = libdevice.sqrt(tmp9) tmp15 = triton_helpers.maximum(tmp14, tmp11) tmp16 = tmp5 / tmp15 tmp17 = tmp13 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp27 = tmp25 + tmp26 tmp28 = 10.0 tmp29 = tmp27 * tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = tmp20 * tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 / tmp33 tmp37 = tl_math.log(tmp36) tmp38 = -tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(16)]( arg0_1, arg1_1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf0 = empty_strided_cuda((1,), (1,), torch.float32) buf2 = reinterpret_tensor(buf0, (), (), 0) del buf0 buf5 = buf2 del buf2 triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1[ grid(1)](buf5, arg0_1, arg1_1, buf3, 1, 4, XBLOCK=1, num_warps= 2, num_stages=1) del arg0_1 del arg1_1 del buf3 return buf5, class LossesOfConVIRTNew(nn.Module): """ """ def __init__(self, tau=0.1, lambd=0.75): super(LossesOfConVIRTNew, self).__init__() self.tau = tau self.lambd = lambd def tmp_loss(self, v, u, index): """ """ assert v.size(0) == u.size(0) item1 = torch.exp(torch.divide(torch.cosine_similarity(v[index], u[ index], dim=0), self.tau)) item2 = torch.exp(torch.divide(torch.cosine_similarity(v[index]. unsqueeze(0), u, dim=1), self.tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def image_text(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v[index].unsqueeze(0), u), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def text_image(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v, u[index].unsqueeze(0)), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)).item() return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
funnyzhou/REFERS
LossesOfConVIRT
false
15,374
[ "MIT" ]
46
392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
https://github.com/funnyzhou/REFERS/tree/392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
LocalResponseNormLayer
import torch import torch.nn as nn import torch.nn.functional as F class LocalResponseNormLayer(nn.Module): def forward(self, tensor, size=5, alpha=9.999999747378752e-05, beta= 0.75, k=1.0): return F.local_response_norm(tensor, size=size, alpha=alpha, beta= beta, k=k) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex tmp48 = tl.load(in_ptr0 + x3, xmask) tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-32 + x3), tmp5 & xmask, other=0.0) tmp7 = tmp6 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = -1 + x1 tmp11 = tmp10 >= tmp1 tmp12 = tmp10 < tmp3 tmp13 = tmp11 & tmp12 tmp14 = tl.load(in_ptr0 + (-16 + x3), tmp13 & xmask, other=0.0) tmp15 = tmp14 * tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp13, tmp15, tmp16) tmp18 = tmp17 + tmp9 tmp19 = x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tl.load(in_ptr0 + x3, tmp22 & xmask, other=0.0) tmp24 = tmp23 * tmp23 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp22, tmp24, tmp25) tmp27 = tmp26 + tmp18 tmp28 = 1 + x1 tmp29 = tmp28 >= tmp1 tmp30 = tmp28 < tmp3 tmp31 = tmp29 & tmp30 tmp32 = tl.load(in_ptr0 + (16 + x3), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp35 + tmp27 tmp37 = 2 + x1 tmp38 = tmp37 >= tmp1 tmp39 = tmp37 < tmp3 tmp40 = tmp38 & tmp39 tmp41 = tl.load(in_ptr0 + (32 + x3), tmp40 & xmask, other=0.0) tmp42 = tmp41 * tmp41 tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp40, tmp42, tmp43) tmp45 = tmp44 + tmp36 tmp46 = 0.2 tmp47 = tmp45 * tmp46 tmp49 = 9.999999747378752e-05 tmp50 = tmp47 * tmp49 tmp51 = 1.0 tmp52 = tmp50 + tmp51 tmp53 = 0.75 tmp54 = libdevice.pow(tmp52, tmp53) tmp55 = tmp48 / tmp54 tl.store(in_out_ptr0 + x3, tmp55, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0[grid(256) ](buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf1, class LocalResponseNormLayerNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fuzhanrahmanian/lucent
LocalResponseNormLayer
false
15,375
[ "Apache-2.0" ]
449
13b24c3c37784185275da73c7a11095b2ae809c5
https://github.com/fuzhanrahmanian/lucent/tree/13b24c3c37784185275da73c7a11095b2ae809c5
LinearTextualHead
import torch import torch.nn as nn from typing import Optional class TextualHead(nn.Module): """ Base class for all textual heads. All child classes can simply inherit from :class:`~torch.nn.Module`, however this is kept here for uniform type annotations. Parameters ---------- visual_feature_size: int Size (number of channels) of the input features from the visual backbone. vocab_size: int Number of tokens in the output vocabulary. hidden_size: int Size of the token embedding vectors, or hidden state vector of the language model. """ def __init__(self, visual_feature_size: 'int', vocab_size: 'int', hidden_size: 'int'): super().__init__() self.visual_feature_size = visual_feature_size self.vocab_size = vocab_size self.hidden_size = hidden_size @property def textual_feature_size(self): """ Size of the last dimension of output right before the output linear layer (which predicts a distribution over vocabulary tokens). This is typically same as :attr:`hidden_size` for most modules. This property is used to add more modules on top of this. """ return self.hidden_size class LinearTextualHead(TextualHead): """ A textual head containing a single linear layer projecting from the visual feature size to the output vocabulary size. Parameters ---------- visual_feature_size: int Size (number of channels) of the input features from the visual backbone. vocab_size: int Number of tokens in the output vocabulary. """ def __init__(self, visual_feature_size: 'int', vocab_size: 'int', **kwargs ): hidden_size = visual_feature_size super().__init__(visual_feature_size, vocab_size, hidden_size) self.output = nn.Linear(visual_feature_size, vocab_size) def forward(self, visual_features: 'torch.Tensor', caption_tokens: 'Optional[torch.Tensor]'=None, caption_lengths: 'Optional[torch.Tensor]'=None) ->torch.Tensor: """ Project visual features directly to predict a distribution over vocabulary tokens through a single linear layer. This textual head ignores arguments ``caption_tokens`` and ``caption_lengths``, they are here for API consistency. Parameters ---------- visual_features: torch.Tensor A tensor of shape ``(batch_size, channels, height, width)`` containing features from visual backbone. Returns ------- torch.Tensor A tensor of shape ``(batch_size, vocab_size)`` containing output vocabulary logits. """ batch_size, channels, _height, _width = visual_features.size() visual_features = visual_features.view(batch_size, channels, -1) visual_features = visual_features.permute(0, 2, 1) visual_features = visual_features.mean(dim=1) output_logits = self.output(visual_features) return output_logits def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'visual_feature_size': 4, 'vocab_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, buf1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 return buf2, buf1 class TextualHead(nn.Module): """ Base class for all textual heads. All child classes can simply inherit from :class:`~torch.nn.Module`, however this is kept here for uniform type annotations. Parameters ---------- visual_feature_size: int Size (number of channels) of the input features from the visual backbone. vocab_size: int Number of tokens in the output vocabulary. hidden_size: int Size of the token embedding vectors, or hidden state vector of the language model. """ def __init__(self, visual_feature_size: 'int', vocab_size: 'int', hidden_size: 'int'): super().__init__() self.visual_feature_size = visual_feature_size self.vocab_size = vocab_size self.hidden_size = hidden_size @property def textual_feature_size(self): """ Size of the last dimension of output right before the output linear layer (which predicts a distribution over vocabulary tokens). This is typically same as :attr:`hidden_size` for most modules. This property is used to add more modules on top of this. """ return self.hidden_size class LinearTextualHeadNew(TextualHead): """ A textual head containing a single linear layer projecting from the visual feature size to the output vocabulary size. Parameters ---------- visual_feature_size: int Size (number of channels) of the input features from the visual backbone. vocab_size: int Number of tokens in the output vocabulary. """ def __init__(self, visual_feature_size: 'int', vocab_size: 'int', **kwargs ): hidden_size = visual_feature_size super().__init__(visual_feature_size, vocab_size, hidden_size) self.output = nn.Linear(visual_feature_size, vocab_size) def forward(self, input_0): primals_2 = self.output.weight primals_3 = self.output.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
funnyzhou/REFERS
LinearTextualHead
false
15,376
[ "MIT" ]
46
392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
https://github.com/funnyzhou/REFERS/tree/392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
MultiHeadAttention
import math import torch import torch.nn as nn class ScaledDotProductAttention(nn.Module): def __init__(self, d_head): super(ScaledDotProductAttention, self).__init__() self.d_head = d_head self.attention_dropout = nn.Dropout(p=0.1) def forward(self, q, k, v, mask=None): attention_weights = torch.matmul(q, k.transpose(-2, -1)) scaled_attention_weights = attention_weights / math.sqrt(self.d_head) if mask is not None: scaled_attention_weights = scaled_attention_weights.masked_fill( mask == 0, float('-inf')) scaled_attention_weights = nn.functional.softmax( scaled_attention_weights, dim=-1) scaled_attention_weights = self.attention_dropout( scaled_attention_weights) weighted_v = torch.matmul(scaled_attention_weights, v) return weighted_v class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads assert d_model % n_heads == 0 self.d_head = d_model // n_heads self.dot_product_attention_layer = ScaledDotProductAttention(self. d_head) self.W_0 = nn.Linear(d_model, d_model) def _split_into_heads(self, q, k, v): q = q.view(q.size(0), q.size(1), self.n_heads, self.d_head) k = k.view(k.size(0), k.size(1), self.n_heads, self.d_head) v = v.view(v.size(0), v.size(1), self.n_heads, self.d_head) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def _concatenate_heads(self, attention_output): attention_output = attention_output.transpose(1, 2).contiguous() attention_output = attention_output.view(attention_output.size(0), attention_output.size(1), -1) return attention_output def forward(self, q, k, v, mask=None): q, k, v = self._split_into_heads(q, k, v) attention_output = self.dot_product_attention_layer(q, k, v, mask) attention_output = self._concatenate_heads(attention_output) attention_output = self.W_0(attention_output) return attention_output def get_inputs(): return [torch.rand([4, 4, 4, 1]), torch.rand([4, 4, 4, 1]), torch.rand( [4, 4, 4, 1])] def get_init_inputs(): return [[], {'d_model': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_0[grid(16, 4)](primals_2, buf1, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 0, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](primals_3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf6 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), out=buf6) del buf4 buf7 = buf5 del buf5 triton_poi_fused_3[grid(16, 4)](buf6, buf7, 16, 4, XBLOCK=4, YBLOCK =16, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0) del buf6 extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_4 del primals_5 return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf7, (16, 4), (4, 1), 0) class ScaledDotProductAttention(nn.Module): def __init__(self, d_head): super(ScaledDotProductAttention, self).__init__() self.d_head = d_head self.attention_dropout = nn.Dropout(p=0.1) def forward(self, q, k, v, mask=None): attention_weights = torch.matmul(q, k.transpose(-2, -1)) scaled_attention_weights = attention_weights / math.sqrt(self.d_head) if mask is not None: scaled_attention_weights = scaled_attention_weights.masked_fill( mask == 0, float('-inf')) scaled_attention_weights = nn.functional.softmax( scaled_attention_weights, dim=-1) scaled_attention_weights = self.attention_dropout( scaled_attention_weights) weighted_v = torch.matmul(scaled_attention_weights, v) return weighted_v class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, n_heads): super(MultiHeadAttentionNew, self).__init__() self.n_heads = n_heads assert d_model % n_heads == 0 self.d_head = d_model // n_heads self.dot_product_attention_layer = ScaledDotProductAttention(self. d_head) self.W_0 = nn.Linear(d_model, d_model) def _split_into_heads(self, q, k, v): q = q.view(q.size(0), q.size(1), self.n_heads, self.d_head) k = k.view(k.size(0), k.size(1), self.n_heads, self.d_head) v = v.view(v.size(0), v.size(1), self.n_heads, self.d_head) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def _concatenate_heads(self, attention_output): attention_output = attention_output.transpose(1, 2).contiguous() attention_output = attention_output.view(attention_output.size(0), attention_output.size(1), -1) return attention_output def forward(self, input_0, input_1, input_2): primals_4 = self.W_0.weight primals_5 = self.W_0.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
francismontalbo/attention-is-all-you-need-paper
MultiHeadAttention
false
15,377
[ "MIT" ]
167
21ba3e48917da0c6808126d183bece6a9969cfd2
https://github.com/francismontalbo/attention-is-all-you-need-paper/tree/21ba3e48917da0c6808126d183bece6a9969cfd2
TransformerGPTEncoderLayer
import math import torch import torch.nn as nn import torch.cuda import torch.distributed def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) def generate_relative_positions_matrix(length, max_relative_positions, cache=False): """Generate the clipped relative positions matrix for a given length and maximum relative positions""" if cache: distance_mat = torch.arange(-length + 1, 1, 1).unsqueeze(0) else: range_vec = torch.arange(length) range_mat = range_vec.unsqueeze(-1).expand(-1, length).transpose(0, 1) distance_mat = range_mat - range_mat.transpose(0, 1) distance_mat_clipped = torch.clamp(distance_mat, min=- max_relative_positions, max=max_relative_positions) final_mat = distance_mat_clipped + max_relative_positions return final_mat def relative_matmul(x, z, transpose): """Helper function for relative positions attention.""" batch_size = x.shape[0] heads = x.shape[1] length = x.shape[2] x_t = x.permute(2, 0, 1, 3) x_t_r = x_t.reshape(length, heads * batch_size, -1) if transpose: z_t = z.transpose(1, 2) x_tz_matmul = torch.matmul(x_t_r, z_t) else: x_tz_matmul = torch.matmul(x_t_r, z) x_tz_matmul_r = x_tz_matmul.reshape(length, batch_size, heads, -1) x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3) return x_tz_matmul_r_t class MLP(nn.Module): def __init__(self, n_embd, n_state, dropout): super(MLP, self).__init__() self.c_fc = nn.Linear(n_embd, n_state) self.c_proj = nn.Linear(n_state, n_embd) self.act = gelu self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): self.c_fc.weight.data.normal_(std=0.02) self.c_fc.bias.data.zero_() self.c_proj.weight.data.normal_(std=0.02) self.c_proj.bias.data.zero_() def forward(self, x): """ x is input, [T, B, n_state] """ h = self.dropout_1(self.act(self.c_fc(x))) h2 = self.dropout_2(self.c_proj(h)) return h2 class MultiHeadedAttention(nn.Module): """Multi-Head Attention module from "Attention is All You Need" :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`. Similar to standard `dot` attention but uses multiple attention distributions simulataneously to select relevant items. .. mermaid:: graph BT A[key] B[value] C[query] O[output] subgraph Attn D[Attn 1] E[Attn 2] F[Attn N] end A --> D C --> D A --> E C --> E A --> F C --> F D --> O E --> O F --> O B --> O Also includes several additional tricks. Args: head_count (int): number of parallel heads model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter """ def __init__(self, head_count, model_dim, dropout=0.1, max_relative_positions=0): assert model_dim % head_count == 0 self.dim_per_head = model_dim // head_count self.model_dim = model_dim super(MultiHeadedAttention, self).__init__() self.head_count = head_count self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head) self.linear_values = nn.Linear(model_dim, head_count * self. dim_per_head) self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head ) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) self.final_linear = nn.Linear(model_dim, model_dim) self.max_relative_positions = max_relative_positions if max_relative_positions > 0: vocab_size = max_relative_positions * 2 + 1 self.relative_positions_embeddings = nn.Embedding(vocab_size, self.dim_per_head) def forward(self, key, value, query, mask=None, layer_cache=None, type=None ): """ Compute the context vector and the attention vectors. Args: key (FloatTensor): set of `key_len` key vectors ``(batch, key_len, dim)`` value (FloatTensor): set of `key_len` value vectors ``(batch, key_len, dim)`` query (FloatTensor): set of `query_len` query vectors ``(batch, query_len, dim)`` mask: binary mask indicating which keys have non-zero attention ``(batch, query_len, key_len)`` Returns: (FloatTensor, FloatTensor): * output context vectors ``(batch, query_len, dim)`` * one of the attention vectors ``(batch, query_len, key_len)`` """ batch_size = key.size(0) dim_per_head = self.dim_per_head head_count = self.head_count key_len = key.size(1) query_len = query.size(1) def shape(x): """Projection.""" return x.view(batch_size, -1, head_count, dim_per_head).transpose( 1, 2) def unshape(x): """Compute context.""" return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head) if layer_cache is not None: if type == 'self': query, key, value = self.linear_query(query), self.linear_keys( query), self.linear_values(query) key = shape(key) value = shape(value) if layer_cache['self_keys'] is not None: key = torch.cat((layer_cache['self_keys'], key), dim=2) if layer_cache['self_values'] is not None: value = torch.cat((layer_cache['self_values'], value), dim=2) layer_cache['self_keys'] = key layer_cache['self_values'] = value elif type == 'context': query = self.linear_query(query) if layer_cache['memory_keys'] is None: key, value = self.linear_keys(key), self.linear_values( value) key = shape(key) value = shape(value) else: key, value = layer_cache['memory_keys'], layer_cache[ 'memory_values'] layer_cache['memory_keys'] = key layer_cache['memory_values'] = value else: key = self.linear_keys(key) value = self.linear_values(value) query = self.linear_query(query) key = shape(key) value = shape(value) if self.max_relative_positions > 0 and type == 'self': key_len = key.size(2) relative_positions_matrix = generate_relative_positions_matrix( key_len, self.max_relative_positions, cache=True if layer_cache is not None else False) relations_keys = self.relative_positions_embeddings( relative_positions_matrix) relations_values = self.relative_positions_embeddings( relative_positions_matrix) query = shape(query) key_len = key.size(2) query_len = query.size(2) query = query / math.sqrt(dim_per_head) query_key = torch.matmul(query, key.transpose(2, 3)) if self.max_relative_positions > 0 and type == 'self': scores = query_key + relative_matmul(query, relations_keys, True) else: scores = query_key scores = scores.float() if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask, -1e+18) attn = self.softmax(scores) drop_attn = self.dropout(attn) context_original = torch.matmul(drop_attn, value) if self.max_relative_positions > 0 and type == 'self': context = unshape(context_original + relative_matmul(drop_attn, relations_values, False)) else: context = unshape(context_original) output = self.final_linear(context) top_attn = attn.view(batch_size, head_count, query_len, key_len)[:, 0, :, :].contiguous() return output, top_attn class TransformerGPTEncoderLayer(nn.Module): """ A single layer of the transformer encoder. Args: d_model (int): the dimension of keys/values/queries in MultiHeadedAttention, also the input size of the first-layer of the PositionwiseFeedForward. heads (int): the number of head for MultiHeadedAttention. d_ff (int): the second-layer of the PositionwiseFeedForward. dropout (float): dropout probability(0-1.0). """ def __init__(self, d_model, heads, d_ff, dropout, attn_dropout, max_relative_positions=0): super(TransformerGPTEncoderLayer, self).__init__() self.self_attn = MultiHeadedAttention(heads, d_model, dropout= attn_dropout, max_relative_positions=max_relative_positions) self.feed_forward = MLP(d_model, d_model * 4, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-05) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-05) self.dropout = nn.Dropout(dropout) def forward(self, inputs, mask): """ Args: inputs (FloatTensor): ``(batch_size, src_len, model_dim)`` mask (LongTensor): ``(batch_size, src_len, src_len)`` Returns: (FloatTensor): * outputs ``(batch_size, src_len, model_dim)`` """ dec_mask = None src_len = mask.size(-1) future_mask = torch.ones([src_len, src_len], device=mask.device, dtype=torch.uint8) future_mask = future_mask.triu_(1).view(1, src_len, src_len) dec_mask = torch.gt(mask + future_mask, 0) input_norm = self.layer_norm_1(inputs) context, _ = self.self_attn(input_norm, input_norm, input_norm, mask=dec_mask, type='self') context = self.dropout(context) + inputs context_norm = self.layer_norm_2(context) output = self.feed_forward(context_norm) output = output + context return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'heads': 4, 'd_ff': 4, 'dropout': 0.5, 'attn_dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_add_gt_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = x0 + -1 * x1 tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1], 1, tl.uint8) tmp5 = tl.full([1], 0, tl.uint8) tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp6.to(tl.float32) tmp8 = tmp0 + tmp7 tmp9 = 0.0 tmp10 = tmp8 > tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp5 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp13 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = -9.999999843067494e+17 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp6 = tl.where(tmp4, tmp2, tmp5) tmp7 = triton_helpers.maximum(tmp3, tmp6) tmp10 = tl.where(tmp8, tmp2, tmp9) tmp11 = triton_helpers.maximum(tmp7, tmp10) tmp14 = tl.where(tmp12, tmp2, tmp13) tmp15 = triton_helpers.maximum(tmp11, tmp14) tmp16 = tmp3 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp6 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp10 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x5, xmask) tmp4 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp2 = -9.999999843067494e+17 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tl.store(in_out_ptr0 + x5, tmp8, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_mul_pow_tanh_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16,), (1,)) assert_size_stride(primals_17, (4, 16), (16, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_4, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_4, buf0, buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_div_2[grid(16, 4)](buf5, primals_10, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf7 = reinterpret_tensor(buf5, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf5 triton_poi_fused_clone_3[grid(16, 4)](buf3, primals_6, buf7, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_6 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_gt_4[grid(64)](primals_1, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf10 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf3 buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_5[grid(64)](buf9, buf8, buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_masked_fill_6[grid(256)](buf12, buf9, buf10, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf11 triton_poi_fused_clone_3[grid(16, 4)](buf4, primals_8, buf13, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 buf14 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf13, (16, 4, 1), (4, 1, 0), 0), out=buf14) buf15 = reinterpret_tensor(buf10, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf10 triton_poi_fused_clone_7[grid(16, 4)](buf14, buf15, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf16 = reinterpret_tensor(buf14, (16, 4), (4, 1), 0) del buf14 extern_kernels.addmm(primals_12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_12 buf17 = buf1 del buf1 buf18 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](buf16, primals_4, buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](buf16, primals_4, buf17, buf18, primals_13, primals_14, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 del buf18 del primals_14 buf20 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_16, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf20) del primals_16 buf21 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_add_mul_pow_tanh_10[grid(256)](buf20, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1) buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf21, (16, 16), (16, 1), 0), reinterpret_tensor(primals_17, (16, 4), (1, 16), 0), out=buf22) buf23 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0) del buf22 triton_poi_fused_add_11[grid(64)](buf23, primals_18, buf16, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_18 return buf23, primals_4, primals_13, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 1, 4, 4), (16, 16, 4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf19, (16, 4), (4, 1), 0 ), buf20, reinterpret_tensor(buf21, (16, 16), (16, 1), 0 ), primals_17, primals_15, primals_11, reinterpret_tensor(buf13, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0 ), primals_9, primals_7, primals_5 def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) def generate_relative_positions_matrix(length, max_relative_positions, cache=False): """Generate the clipped relative positions matrix for a given length and maximum relative positions""" if cache: distance_mat = torch.arange(-length + 1, 1, 1).unsqueeze(0) else: range_vec = torch.arange(length) range_mat = range_vec.unsqueeze(-1).expand(-1, length).transpose(0, 1) distance_mat = range_mat - range_mat.transpose(0, 1) distance_mat_clipped = torch.clamp(distance_mat, min=- max_relative_positions, max=max_relative_positions) final_mat = distance_mat_clipped + max_relative_positions return final_mat def relative_matmul(x, z, transpose): """Helper function for relative positions attention.""" batch_size = x.shape[0] heads = x.shape[1] length = x.shape[2] x_t = x.permute(2, 0, 1, 3) x_t_r = x_t.reshape(length, heads * batch_size, -1) if transpose: z_t = z.transpose(1, 2) x_tz_matmul = torch.matmul(x_t_r, z_t) else: x_tz_matmul = torch.matmul(x_t_r, z) x_tz_matmul_r = x_tz_matmul.reshape(length, batch_size, heads, -1) x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3) return x_tz_matmul_r_t class MLP(nn.Module): def __init__(self, n_embd, n_state, dropout): super(MLP, self).__init__() self.c_fc = nn.Linear(n_embd, n_state) self.c_proj = nn.Linear(n_state, n_embd) self.act = gelu self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): self.c_fc.weight.data.normal_(std=0.02) self.c_fc.bias.data.zero_() self.c_proj.weight.data.normal_(std=0.02) self.c_proj.bias.data.zero_() def forward(self, x): """ x is input, [T, B, n_state] """ h = self.dropout_1(self.act(self.c_fc(x))) h2 = self.dropout_2(self.c_proj(h)) return h2 class MultiHeadedAttention(nn.Module): """Multi-Head Attention module from "Attention is All You Need" :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`. Similar to standard `dot` attention but uses multiple attention distributions simulataneously to select relevant items. .. mermaid:: graph BT A[key] B[value] C[query] O[output] subgraph Attn D[Attn 1] E[Attn 2] F[Attn N] end A --> D C --> D A --> E C --> E A --> F C --> F D --> O E --> O F --> O B --> O Also includes several additional tricks. Args: head_count (int): number of parallel heads model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter """ def __init__(self, head_count, model_dim, dropout=0.1, max_relative_positions=0): assert model_dim % head_count == 0 self.dim_per_head = model_dim // head_count self.model_dim = model_dim super(MultiHeadedAttention, self).__init__() self.head_count = head_count self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head) self.linear_values = nn.Linear(model_dim, head_count * self. dim_per_head) self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head ) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) self.final_linear = nn.Linear(model_dim, model_dim) self.max_relative_positions = max_relative_positions if max_relative_positions > 0: vocab_size = max_relative_positions * 2 + 1 self.relative_positions_embeddings = nn.Embedding(vocab_size, self.dim_per_head) def forward(self, key, value, query, mask=None, layer_cache=None, type=None ): """ Compute the context vector and the attention vectors. Args: key (FloatTensor): set of `key_len` key vectors ``(batch, key_len, dim)`` value (FloatTensor): set of `key_len` value vectors ``(batch, key_len, dim)`` query (FloatTensor): set of `query_len` query vectors ``(batch, query_len, dim)`` mask: binary mask indicating which keys have non-zero attention ``(batch, query_len, key_len)`` Returns: (FloatTensor, FloatTensor): * output context vectors ``(batch, query_len, dim)`` * one of the attention vectors ``(batch, query_len, key_len)`` """ batch_size = key.size(0) dim_per_head = self.dim_per_head head_count = self.head_count key_len = key.size(1) query_len = query.size(1) def shape(x): """Projection.""" return x.view(batch_size, -1, head_count, dim_per_head).transpose( 1, 2) def unshape(x): """Compute context.""" return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head) if layer_cache is not None: if type == 'self': query, key, value = self.linear_query(query), self.linear_keys( query), self.linear_values(query) key = shape(key) value = shape(value) if layer_cache['self_keys'] is not None: key = torch.cat((layer_cache['self_keys'], key), dim=2) if layer_cache['self_values'] is not None: value = torch.cat((layer_cache['self_values'], value), dim=2) layer_cache['self_keys'] = key layer_cache['self_values'] = value elif type == 'context': query = self.linear_query(query) if layer_cache['memory_keys'] is None: key, value = self.linear_keys(key), self.linear_values( value) key = shape(key) value = shape(value) else: key, value = layer_cache['memory_keys'], layer_cache[ 'memory_values'] layer_cache['memory_keys'] = key layer_cache['memory_values'] = value else: key = self.linear_keys(key) value = self.linear_values(value) query = self.linear_query(query) key = shape(key) value = shape(value) if self.max_relative_positions > 0 and type == 'self': key_len = key.size(2) relative_positions_matrix = generate_relative_positions_matrix( key_len, self.max_relative_positions, cache=True if layer_cache is not None else False) relations_keys = self.relative_positions_embeddings( relative_positions_matrix) relations_values = self.relative_positions_embeddings( relative_positions_matrix) query = shape(query) key_len = key.size(2) query_len = query.size(2) query = query / math.sqrt(dim_per_head) query_key = torch.matmul(query, key.transpose(2, 3)) if self.max_relative_positions > 0 and type == 'self': scores = query_key + relative_matmul(query, relations_keys, True) else: scores = query_key scores = scores.float() if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask, -1e+18) attn = self.softmax(scores) drop_attn = self.dropout(attn) context_original = torch.matmul(drop_attn, value) if self.max_relative_positions > 0 and type == 'self': context = unshape(context_original + relative_matmul(drop_attn, relations_values, False)) else: context = unshape(context_original) output = self.final_linear(context) top_attn = attn.view(batch_size, head_count, query_len, key_len)[:, 0, :, :].contiguous() return output, top_attn class TransformerGPTEncoderLayerNew(nn.Module): """ A single layer of the transformer encoder. Args: d_model (int): the dimension of keys/values/queries in MultiHeadedAttention, also the input size of the first-layer of the PositionwiseFeedForward. heads (int): the number of head for MultiHeadedAttention. d_ff (int): the second-layer of the PositionwiseFeedForward. dropout (float): dropout probability(0-1.0). """ def __init__(self, d_model, heads, d_ff, dropout, attn_dropout, max_relative_positions=0): super(TransformerGPTEncoderLayerNew, self).__init__() self.self_attn = MultiHeadedAttention(heads, d_model, dropout= attn_dropout, max_relative_positions=max_relative_positions) self.feed_forward = MLP(d_model, d_model * 4, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-05) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-05) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1): primals_5 = self.self_attn.linear_keys.weight primals_2 = self.self_attn.linear_keys.bias primals_7 = self.self_attn.linear_values.weight primals_3 = self.self_attn.linear_values.bias primals_9 = self.self_attn.linear_query.weight primals_6 = self.self_attn.linear_query.bias primals_11 = self.self_attn.final_linear.weight primals_8 = self.self_attn.final_linear.bias primals_15 = self.feed_forward.c_fc.weight primals_16 = self.feed_forward.c_fc.bias primals_17 = self.feed_forward.c_proj.weight primals_10 = self.feed_forward.c_proj.bias primals_12 = self.layer_norm_1.weight primals_13 = self.layer_norm_1.bias primals_14 = self.layer_norm_2.weight primals_18 = self.layer_norm_2.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
fangleai/encoder-agnostic-adaptation
TransformerGPTEncoderLayer
false
15,378
[ "MIT" ]
70
d917e654152df202dd35bba49c409c3ecd24eaf7
https://github.com/fangleai/encoder-agnostic-adaptation/tree/d917e654152df202dd35bba49c409c3ecd24eaf7
DiceLoss
import torch import torch.nn as nn class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (128 + r0), None) tmp25 = tl.load(in_ptr1 + (128 + r0), None) tmp36 = tl.load(in_ptr0 + (192 + r0), None) tmp37 = tl.load(in_ptr1 + (192 + r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp20 + tmp23 tmp60 = tmp59 + tmp50 tmp61 = tmp58 / tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp32 + tmp35 tmp66 = tmp65 + tmp50 tmp67 = tmp64 / tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp44 + tmp47 tmp72 = tmp71 + tmp50 tmp73 = tmp70 / tmp72 tmp74 = tmp68 + tmp73 tmp75 = 0.25 tmp76 = tmp74 * tmp75 tmp77 = tmp50 - tmp76 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp77, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf12 = buf0 del buf0 buf13 = buf12 del buf12 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf13, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf13, class DiceLossNew(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLossNew, self).__init__() def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
g-freire/Brain-Tumor-Segmentation
DiceLoss
false
15,379
[ "MIT" ]
156
e4f258feb64c11815570e295c58bda78afd21ab9
https://github.com/g-freire/Brain-Tumor-Segmentation/tree/e4f258feb64c11815570e295c58bda78afd21ab9
MaxPool2dLayer
import torch import torch.nn as nn import torch.nn.functional as F class MaxPool2dLayer(nn.Module): def forward(self, tensor, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False): return F.max_pool2d(tensor, kernel_size, stride=stride, padding= padding, ceil_mode=ceil_mode) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), xmask) tmp7 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), xmask) tmp9 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), xmask) tmp13 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), xmask) tmp15 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPool2dLayerNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fuzhanrahmanian/lucent
MaxPool2dLayer
false
15,380
[ "Apache-2.0" ]
449
13b24c3c37784185275da73c7a11095b2ae809c5
https://github.com/fuzhanrahmanian/lucent/tree/13b24c3c37784185275da73c7a11095b2ae809c5
CosineBasisLinear
import torch import numpy as np from torch import nn def cosine_basis_functions(x, n_basis_functions=64): """Cosine basis functions used to embed quantile thresholds. Args: x (torch.Tensor): Input. n_basis_functions (int): Number of cosine basis functions. Returns: ndarray: Embedding with shape of (x.shape + (n_basis_functions,)). """ i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device =x.device) * np.pi embedding = torch.cos(x[..., None] * i_pi) assert embedding.shape == x.shape + (n_basis_functions,) return embedding class CosineBasisLinear(nn.Module): """Linear layer following cosine basis functions. Args: n_basis_functions (int): Number of cosine basis functions. out_size (int): Output size. """ def __init__(self, n_basis_functions, out_size): super().__init__() self.linear = nn.Linear(n_basis_functions, out_size) self.n_basis_functions = n_basis_functions self.out_size = out_size def forward(self, x): """Evaluate. Args: x (torch.Tensor): Input. Returns: torch.Tensor: Output with shape of (x.shape + (out_size,)). """ h = cosine_basis_functions(x, self.n_basis_functions) h = h.reshape(-1, self.n_basis_functions) out = self.linear(h) out = out.reshape(*x.shape, self.out_size) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_basis_functions': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_arange_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = 1 + x0 tmp2 = tmp1.to(tl.float32) tmp3 = 3.141592653589793 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl_math.cos(tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_arange_cos_mul_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (256, 4), (4, 1), 0) def cosine_basis_functions(x, n_basis_functions=64): """Cosine basis functions used to embed quantile thresholds. Args: x (torch.Tensor): Input. n_basis_functions (int): Number of cosine basis functions. Returns: ndarray: Embedding with shape of (x.shape + (n_basis_functions,)). """ i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device =x.device) * np.pi embedding = torch.cos(x[..., None] * i_pi) assert embedding.shape == x.shape + (n_basis_functions,) return embedding class CosineBasisLinearNew(nn.Module): """Linear layer following cosine basis functions. Args: n_basis_functions (int): Number of cosine basis functions. out_size (int): Output size. """ def __init__(self, n_basis_functions, out_size): super().__init__() self.linear = nn.Linear(n_basis_functions, out_size) self.n_basis_functions = n_basis_functions self.out_size = out_size def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
g-votte/pfrl
CosineBasisLinear
false
15,381
[ "MIT" ]
824
4c30c1d73f0941a2b649b62937eec346bb55a95e
https://github.com/g-votte/pfrl/tree/4c30c1d73f0941a2b649b62937eec346bb55a95e
FCLateActionSAQFunction
import torch import numpy as np from torch import nn from abc import ABCMeta from abc import abstractmethod import torch.nn.functional as F def init_lecun_normal(tensor, scale=1.0): """Initializes the tensor with LeCunNormal.""" fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in') std = scale * np.sqrt(1.0 / fan_in) with torch.no_grad(): return tensor.normal_(0, std) @torch.no_grad() def init_chainer_default(layer): """Initializes the layer with the chainer default. weights with LeCunNormal(scale=1.0) and zeros as biases """ assert isinstance(layer, nn.Module) if isinstance(layer, (nn.Linear, nn.Conv2d)): init_lecun_normal(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) return layer class MLP(nn.Module): """Multi-Layer Perceptron""" def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu, last_wscale=1): self.in_size = in_size self.out_size = out_size self.hidden_sizes = hidden_sizes self.nonlinearity = nonlinearity super().__init__() if hidden_sizes: self.hidden_layers = nn.ModuleList() self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0])) for hin, hout in zip(hidden_sizes, hidden_sizes[1:]): self.hidden_layers.append(nn.Linear(hin, hout)) self.hidden_layers.apply(init_chainer_default) self.output = nn.Linear(hidden_sizes[-1], out_size) else: self.output = nn.Linear(in_size, out_size) init_lecun_normal(self.output.weight, scale=last_wscale) nn.init.zeros_(self.output.bias) def forward(self, x): h = x if self.hidden_sizes: for l in self.hidden_layers: h = self.nonlinearity(l(h)) return self.output(h) class StateActionQFunction(object, metaclass=ABCMeta): """Abstract Q-function with state and action input.""" @abstractmethod def __call__(self, x, a): """Evaluates Q-function Args: x (ndarray): state input a (ndarray): action input Returns: Q-value for state x and action a """ raise NotImplementedError() class FCLateActionSAQFunction(nn.Module, StateActionQFunction): """Fully-connected (s,a)-input Q-function with late action input. Actions are not included until the second hidden layer and not normalized. This architecture is used in the DDPG paper: http://arxiv.org/abs/1509.02971 Args: n_dim_obs (int): Number of dimensions of observation space. n_dim_action (int): Number of dimensions of action space. n_hidden_channels (int): Number of hidden channels. n_hidden_layers (int): Number of hidden layers. It must be greater than or equal to 1. nonlinearity (callable): Nonlinearity between layers. It must accept a Variable as an argument and return a Variable with the same shape. Nonlinearities with learnable parameters such as PReLU are not supported. last_wscale (float): Scale of weight initialization of the last layer. """ def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels, n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0): assert n_hidden_layers >= 1 self.n_input_channels = n_dim_obs + n_dim_action self.n_hidden_layers = n_hidden_layers self.n_hidden_channels = n_hidden_channels self.nonlinearity = nonlinearity super().__init__() self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels, hidden_sizes=[]) self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1, hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers - 1), nonlinearity=nonlinearity, last_wscale=last_wscale) self.output = self.mlp.output def forward(self, state, action): h = self.nonlinearity(self.obs_mlp(state)) h = torch.cat((h, action), dim=1) return self.mlp(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_dim_obs': 4, 'n_dim_action': 4, 'n_hidden_channels': 4, 'n_hidden_layers': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch import nn from abc import ABCMeta from abc import abstractmethod import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 8), (8, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](buf0, primals_3, primals_4, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf0, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_3 return buf3, primals_1, buf1, primals_5, buf4 def init_lecun_normal(tensor, scale=1.0): """Initializes the tensor with LeCunNormal.""" fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in') std = scale * np.sqrt(1.0 / fan_in) with torch.no_grad(): return tensor.normal_(0, std) @torch.no_grad() def init_chainer_default(layer): """Initializes the layer with the chainer default. weights with LeCunNormal(scale=1.0) and zeros as biases """ assert isinstance(layer, nn.Module) if isinstance(layer, (nn.Linear, nn.Conv2d)): init_lecun_normal(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) return layer class MLP(nn.Module): """Multi-Layer Perceptron""" def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu, last_wscale=1): self.in_size = in_size self.out_size = out_size self.hidden_sizes = hidden_sizes self.nonlinearity = nonlinearity super().__init__() if hidden_sizes: self.hidden_layers = nn.ModuleList() self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0])) for hin, hout in zip(hidden_sizes, hidden_sizes[1:]): self.hidden_layers.append(nn.Linear(hin, hout)) self.hidden_layers.apply(init_chainer_default) self.output = nn.Linear(hidden_sizes[-1], out_size) else: self.output = nn.Linear(in_size, out_size) init_lecun_normal(self.output.weight, scale=last_wscale) nn.init.zeros_(self.output.bias) def forward(self, x): h = x if self.hidden_sizes: for l in self.hidden_layers: h = self.nonlinearity(l(h)) return self.output(h) class StateActionQFunction(object, metaclass=ABCMeta): """Abstract Q-function with state and action input.""" @abstractmethod def __call__(self, x, a): """Evaluates Q-function Args: x (ndarray): state input a (ndarray): action input Returns: Q-value for state x and action a """ raise NotImplementedError() class FCLateActionSAQFunctionNew(nn.Module, StateActionQFunction): """Fully-connected (s,a)-input Q-function with late action input. Actions are not included until the second hidden layer and not normalized. This architecture is used in the DDPG paper: http://arxiv.org/abs/1509.02971 Args: n_dim_obs (int): Number of dimensions of observation space. n_dim_action (int): Number of dimensions of action space. n_hidden_channels (int): Number of hidden channels. n_hidden_layers (int): Number of hidden layers. It must be greater than or equal to 1. nonlinearity (callable): Nonlinearity between layers. It must accept a Variable as an argument and return a Variable with the same shape. Nonlinearities with learnable parameters such as PReLU are not supported. last_wscale (float): Scale of weight initialization of the last layer. """ def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels, n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0): assert n_hidden_layers >= 1 self.n_input_channels = n_dim_obs + n_dim_action self.n_hidden_layers = n_hidden_layers self.n_hidden_channels = n_hidden_channels self.nonlinearity = nonlinearity super().__init__() self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels, hidden_sizes=[]) self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1, hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers - 1), nonlinearity=nonlinearity, last_wscale=last_wscale) self.output = self.mlp.output def forward(self, input_0, input_1): primals_1 = self.obs_mlp.output.weight primals_3 = self.obs_mlp.output.bias primals_5 = self.mlp.output.weight primals_6 = self.mlp.output.bias primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
g-votte/pfrl
FCLateActionSAQFunction
false
15,382
[ "MIT" ]
824
4c30c1d73f0941a2b649b62937eec346bb55a95e
https://github.com/g-votte/pfrl/tree/4c30c1d73f0941a2b649b62937eec346bb55a95e
BertAttention
from _paritybench_helpers import _mock_config import math import torch from torch import nn class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = torch.clamp(attention_scores, -10000.0, 10000.0) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_clamp_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = -10000.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 10000.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = triton_helpers.maximum(tmp10, tmp3) tmp12 = triton_helpers.minimum(tmp11, tmp5) tmp14 = tmp12 + tmp13 tmp15 = triton_helpers.maximum(tmp8, tmp14) tmp17 = tmp16 * tmp1 tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = triton_helpers.minimum(tmp18, tmp5) tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp15, tmp21) tmp24 = tmp23 * tmp1 tmp25 = triton_helpers.maximum(tmp24, tmp3) tmp26 = triton_helpers.minimum(tmp25, tmp5) tmp28 = tmp26 + tmp27 tmp29 = triton_helpers.maximum(tmp22, tmp28) tmp30 = tmp8 - tmp29 tmp31 = tl_math.exp(tmp30) tmp32 = tmp14 - tmp29 tmp33 = tl_math.exp(tmp32) tmp34 = tmp31 + tmp33 tmp35 = tmp21 - tmp29 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp38 = tmp28 - tmp29 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tl.store(out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr1 + x2, tmp40, xmask) @triton.jit def triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp7 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = -10000.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 10000.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = tmp2 >= tmp3 tmp15 = tmp2 <= tmp5 tmp16 = tmp14 & tmp15 tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mean_pow_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_clamp_div_1[grid(64)](buf5, primals_8, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2[grid(256)]( buf5, primals_8, buf6, buf7, buf8, buf16, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_10 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_pow_sub_4[grid(16)](buf12, primals_3, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_5[grid(64)](primals_11, buf12, primals_3, buf13, buf14, primals_12, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_12 return buf15, primals_3, primals_11, buf8, reinterpret_tensor(buf11, ( 16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), buf16, reinterpret_tensor(buf3, (16, 1, 4), ( 4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = torch.clamp(attention_scores, -10000.0, 10000.0) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
BIT-ENGD/eeqa
BertAttention
false
15,383
[ "MIT" ]
142
2995abbaff1fb47131246a247ee7ed62aa94f4c3
https://github.com/BIT-ENGD/eeqa/tree/2995abbaff1fb47131246a247ee7ed62aa94f4c3
FocalLoss
import torch from torch import nn def log_minus_sigmoid(x): return torch.clamp(-x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) def log_sigmoid(x): return torch.clamp(x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) class FocalLoss(nn.Module): def __init__(self, gamma=2): super(FocalLoss, self).__init__() self.gamma = gamma def forward(self, input, target): pos_log_sig = log_sigmoid(input) neg_log_sig = log_minus_sigmoid(input) prob = torch.sigmoid(input) pos_weight = torch.pow(1 - prob, self.gamma) neg_weight = torch.pow(prob, self.gamma) loss = -(target * pos_weight * pos_log_sig + (1 - target) * neg_weight * neg_log_sig) avg_weight = target * pos_weight + (1 - target) * neg_weight loss /= avg_weight.mean() return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tl.sigmoid(tmp1) tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp0 * tmp5 tmp7 = 0.0 tmp8 = triton_helpers.minimum(tmp1, tmp7) tmp9 = tl_math.abs(tmp1) tmp10 = -tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tl_math.log(tmp12) tmp14 = tmp8 - tmp13 tmp15 = triton_helpers.maximum(tmp1, tmp7) tmp16 = triton_helpers.minimum(tmp15, tmp7) tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp6 * tmp19 tmp21 = tmp3 - tmp0 tmp22 = tmp2 * tmp2 tmp23 = tmp21 * tmp22 tmp24 = -tmp1 tmp25 = triton_helpers.minimum(tmp24, tmp7) tmp26 = tmp25 - tmp13 tmp27 = tmp26 + tmp18 tmp28 = tmp23 * tmp27 tmp29 = tmp20 + tmp28 tmp30 = -tmp29 tmp31 = tmp6 + tmp23 tmp32 = tl.broadcast_to(tmp31, [RBLOCK]) tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0)) tmp35 = 256.0 tmp36 = tmp34 / tmp35 tmp37 = tmp30 / tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tmp41 = tmp40 / tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp41, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0[ grid(1)](buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, def log_minus_sigmoid(x): return torch.clamp(-x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) def log_sigmoid(x): return torch.clamp(x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) class FocalLossNew(nn.Module): def __init__(self, gamma=2): super(FocalLossNew, self).__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
gabrielsluz/vince
FocalLoss
false
15,384
[ "Apache-2.0" ]
61
f4e17a2cf70c080a7e01e46d15537e33224c869b
https://github.com/gabrielsluz/vince/tree/f4e17a2cf70c080a7e01e46d15537e33224c869b
PPO
import random import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BatchMaker: def __init__(self, states, actions, returns, advantages, old_policies): self.states = states self.actions = actions self.returns = returns self.advantages = advantages self.old_policies = old_policies def sample(self): sample_indexes = random.sample(range(len(self.states)), batch_size) states_sample = self.states[sample_indexes] actions_sample = self.actions[sample_indexes] retruns_sample = self.returns[sample_indexes] advantages_sample = self.advantages[sample_indexes] old_policies_sample = self.old_policies[sample_indexes] return (states_sample, actions_sample, retruns_sample, advantages_sample, old_policies_sample) class PPO(nn.Module): def __init__(self, num_inputs, num_outputs): super(PPO, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc = nn.Linear(num_inputs, 128) self.fc_actor = nn.Linear(128, num_outputs) self.fc_critic = nn.Linear(128, 1) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) def forward(self, input): x = torch.relu(self.fc(input)) policy = F.softmax(self.fc_actor(x), dim=-1) value = self.fc_critic(x) return policy, value @classmethod def get_gae(self, values, rewards, masks): returns = torch.zeros_like(rewards) advantages = torch.zeros_like(rewards) running_return = 0 previous_value = 0 running_advantage = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] running_tderror = rewards[t] + gamma * previous_value * masks[t ] - values.data[t] running_advantage = (running_tderror + gamma * lambda_gae * running_advantage * masks[t]) returns[t] = running_return previous_value = values.data[t] advantages[t] = running_advantage return returns, advantages @classmethod def train_model(cls, net, transitions, optimizer): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) old_policies, old_values = net(states) old_policies = old_policies.view(-1, net.num_outputs).detach() returns, advantages = net.get_gae(old_values.view(-1).detach(), rewards, masks) batch_maker = BatchMaker(states, actions, returns, advantages, old_policies) for _ in range(epoch_k): for _ in range(len(states) // batch_size): (states_sample, actions_sample, returns_sample, advantages_sample, old_policies_sample ) = batch_maker.sample() policies, values = net(states_sample) values = values.view(-1) policies = policies.view(-1, net.num_outputs) ratios = (policies / old_policies_sample * actions_sample. detach()).sum(dim=1) clipped_ratios = torch.clamp(ratios, min=1.0 - epsilon_clip, max=1.0 + epsilon_clip) actor_loss = -torch.min(ratios * advantages_sample, clipped_ratios * advantages_sample).sum() critic_loss = (returns_sample.detach() - values).pow(2).sum() policy_entropy = (torch.log(policies) * policies).sum(1, keepdim=True).mean() loss = (actor_loss + ciritic_coefficient * critic_loss - entropy_coefficient * policy_entropy) optimizer.zero_grad() loss.backward() optimizer.step() return loss def get_action(self, input): policy, _ = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import random import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_7 return buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_6, primals_4, buf7 class BatchMaker: def __init__(self, states, actions, returns, advantages, old_policies): self.states = states self.actions = actions self.returns = returns self.advantages = advantages self.old_policies = old_policies def sample(self): sample_indexes = random.sample(range(len(self.states)), batch_size) states_sample = self.states[sample_indexes] actions_sample = self.actions[sample_indexes] retruns_sample = self.returns[sample_indexes] advantages_sample = self.advantages[sample_indexes] old_policies_sample = self.old_policies[sample_indexes] return (states_sample, actions_sample, retruns_sample, advantages_sample, old_policies_sample) class PPONew(nn.Module): def __init__(self, num_inputs, num_outputs): super(PPONew, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc = nn.Linear(num_inputs, 128) self.fc_actor = nn.Linear(128, num_outputs) self.fc_critic = nn.Linear(128, 1) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) @classmethod def get_gae(self, values, rewards, masks): returns = torch.zeros_like(rewards) advantages = torch.zeros_like(rewards) running_return = 0 previous_value = 0 running_advantage = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] running_tderror = rewards[t] + gamma * previous_value * masks[t ] - values.data[t] running_advantage = (running_tderror + gamma * lambda_gae * running_advantage * masks[t]) returns[t] = running_return previous_value = values.data[t] advantages[t] = running_advantage return returns, advantages @classmethod def train_model(cls, net, transitions, optimizer): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) old_policies, old_values = net(states) old_policies = old_policies.view(-1, net.num_outputs).detach() returns, advantages = net.get_gae(old_values.view(-1).detach(), rewards, masks) batch_maker = BatchMaker(states, actions, returns, advantages, old_policies) for _ in range(epoch_k): for _ in range(len(states) // batch_size): (states_sample, actions_sample, returns_sample, advantages_sample, old_policies_sample ) = batch_maker.sample() policies, values = net(states_sample) values = values.view(-1) policies = policies.view(-1, net.num_outputs) ratios = (policies / old_policies_sample * actions_sample. detach()).sum(dim=1) clipped_ratios = torch.clamp(ratios, min=1.0 - epsilon_clip, max=1.0 + epsilon_clip) actor_loss = -torch.min(ratios * advantages_sample, clipped_ratios * advantages_sample).sum() critic_loss = (returns_sample.detach() - values).pow(2).sum() policy_entropy = (torch.log(policies) * policies).sum(1, keepdim=True).mean() loss = (actor_loss + ciritic_coefficient * critic_loss - entropy_coefficient * policy_entropy) optimizer.zero_grad() loss.backward() optimizer.step() return loss def get_action(self, input): policy, _ = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.fc_actor.weight primals_5 = self.fc_actor.bias primals_6 = self.fc_critic.weight primals_7 = self.fc_critic.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
g6ling/Pytorch-Cartpole
PPO
false
15,385
[ "MIT" ]
116
ecb7b622cfefe825ac95388cceb6752413d90a2a
https://github.com/g6ling/Pytorch-Cartpole/tree/ecb7b622cfefe825ac95388cceb6752413d90a2a
BCEDiceLoss
import torch import torch.nn as nn import torch.nn.functional as F class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient class BCEDiceLoss(nn.Module): """ Combination of Binary Cross Entropy Loss and Soft Dice Loss. This combined loss is used to train the network so that both benefits of the loss are leveraged. """ def __init__(self, device): """Simple constructor for the class.""" super(BCEDiceLoss, self).__init__() self.dice_loss = DiceLoss() def forward(self, predicted, target): """ Method for calculation of combined loss from sample.""" return F.binary_cross_entropy(predicted, target) + self.dice_loss( predicted, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (128 + r0), None) tmp25 = tl.load(in_ptr1 + (128 + r0), None) tmp36 = tl.load(in_ptr0 + (192 + r0), None) tmp37 = tl.load(in_ptr1 + (192 + r0), None) tmp75 = tl.load(in_out_ptr1 + 0) tmp76 = tl.broadcast_to(tmp75, [XBLOCK, 1]) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp20 + tmp23 tmp60 = tmp59 + tmp50 tmp61 = tmp58 / tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp32 + tmp35 tmp66 = tmp65 + tmp50 tmp67 = tmp64 / tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp44 + tmp47 tmp72 = tmp71 + tmp50 tmp73 = tmp70 / tmp72 tmp74 = tmp68 + tmp73 tmp77 = 256.0 tmp78 = tmp76 / tmp77 tmp79 = 0.25 tmp80 = tmp74 * tmp79 tmp81 = tmp50 - tmp80 tmp82 = tmp78 + tmp81 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp82, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf14 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1[grid(1)]( buf14, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf14, class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient class BCEDiceLossNew(nn.Module): """ Combination of Binary Cross Entropy Loss and Soft Dice Loss. This combined loss is used to train the network so that both benefits of the loss are leveraged. """ def __init__(self, device): """Simple constructor for the class.""" super(BCEDiceLossNew, self).__init__() self.dice_loss = DiceLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
g-freire/Brain-Tumor-Segmentation
BCEDiceLoss
false
15,386
[ "MIT" ]
156
e4f258feb64c11815570e295c58bda78afd21ab9
https://github.com/g-freire/Brain-Tumor-Segmentation/tree/e4f258feb64c11815570e295c58bda78afd21ab9
TNPG
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def flat_grad(grads): grad_flatten = [] for grad in grads: grad_flatten.append(grad.view(-1)) grad_flatten = torch.cat(grad_flatten) return grad_flatten def flat_hessian(hessians): hessians_flatten = [] for hessian in hessians: hessians_flatten.append(hessian.contiguous().view(-1)) hessians_flatten = torch.cat(hessians_flatten).data return hessians_flatten def kl_divergence(policy, old_policy): kl = old_policy * torch.log(old_policy / policy) kl = kl.sum(1, keepdim=True) return kl def fisher_vector_product(net, states, p, cg_damp=0.1): policy = net(states) old_policy = net(states).detach() kl = kl_divergence(policy, old_policy) kl = kl.mean() kl_grad = torch.autograd.grad(kl, net.parameters(), create_graph=True) kl_grad = flat_grad(kl_grad) kl_grad_p = (kl_grad * p.detach()).sum() kl_hessian_p = torch.autograd.grad(kl_grad_p, net.parameters()) kl_hessian_p = flat_hessian(kl_hessian_p) return kl_hessian_p + cg_damp * p.detach() def conjugate_gradient(net, states, loss_grad, n_step=10, residual_tol=1e-10): x = torch.zeros(loss_grad.size()) r = loss_grad.clone() p = loss_grad.clone() r_dot_r = torch.dot(r, r) for i in range(n_step): A_dot_p = fisher_vector_product(net, states, p) alpha = r_dot_r / torch.dot(p, A_dot_p) x += alpha * p r -= alpha * A_dot_p new_r_dot_r = torch.dot(r, r) betta = new_r_dot_r / r_dot_r p = r + betta * p r_dot_r = new_r_dot_r if r_dot_r < residual_tol: break return x def flat_params(model): params = [] for param in model.parameters(): params.append(param.data.view(-1)) params_flatten = torch.cat(params) return params_flatten def update_model(model, new_params): index = 0 for params in model.parameters(): params_length = len(params.view(-1)) new_param = new_params[index:index + params_length] new_param = new_param.view(params.size()) params.data.copy_(new_param) index += params_length class TNPG(nn.Module): def __init__(self, num_inputs, num_outputs): super(TNPG, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc_1 = nn.Linear(num_inputs, 128) self.fc_2 = nn.Linear(128, num_outputs) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) def forward(self, input): x = torch.tanh(self.fc_1(input)) policy = F.softmax(self.fc_2(x)) return policy @classmethod def train_model(cls, net, transitions): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) returns = torch.zeros_like(rewards) running_return = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] returns[t] = running_return policies = net(states) policies = policies.view(-1, net.num_outputs) policy_actions = (policies * actions.detach()).sum(dim=1) loss = (policy_actions * returns).mean() loss_grad = torch.autograd.grad(loss, net.parameters()) loss_grad = flat_grad(loss_grad) step_dir = conjugate_gradient(net, states, loss_grad.data) params = flat_params(net) new_params = params + lr * step_dir update_model(net, new_params) return -loss def get_action(self, input): policy = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(8192)](buf1, primals_2, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_4 def flat_grad(grads): grad_flatten = [] for grad in grads: grad_flatten.append(grad.view(-1)) grad_flatten = torch.cat(grad_flatten) return grad_flatten def flat_hessian(hessians): hessians_flatten = [] for hessian in hessians: hessians_flatten.append(hessian.contiguous().view(-1)) hessians_flatten = torch.cat(hessians_flatten).data return hessians_flatten def kl_divergence(policy, old_policy): kl = old_policy * torch.log(old_policy / policy) kl = kl.sum(1, keepdim=True) return kl def fisher_vector_product(net, states, p, cg_damp=0.1): policy = net(states) old_policy = net(states).detach() kl = kl_divergence(policy, old_policy) kl = kl.mean() kl_grad = torch.autograd.grad(kl, net.parameters(), create_graph=True) kl_grad = flat_grad(kl_grad) kl_grad_p = (kl_grad * p.detach()).sum() kl_hessian_p = torch.autograd.grad(kl_grad_p, net.parameters()) kl_hessian_p = flat_hessian(kl_hessian_p) return kl_hessian_p + cg_damp * p.detach() def conjugate_gradient(net, states, loss_grad, n_step=10, residual_tol=1e-10): x = torch.zeros(loss_grad.size()) r = loss_grad.clone() p = loss_grad.clone() r_dot_r = torch.dot(r, r) for i in range(n_step): A_dot_p = fisher_vector_product(net, states, p) alpha = r_dot_r / torch.dot(p, A_dot_p) x += alpha * p r -= alpha * A_dot_p new_r_dot_r = torch.dot(r, r) betta = new_r_dot_r / r_dot_r p = r + betta * p r_dot_r = new_r_dot_r if r_dot_r < residual_tol: break return x def flat_params(model): params = [] for param in model.parameters(): params.append(param.data.view(-1)) params_flatten = torch.cat(params) return params_flatten def update_model(model, new_params): index = 0 for params in model.parameters(): params_length = len(params.view(-1)) new_param = new_params[index:index + params_length] new_param = new_param.view(params.size()) params.data.copy_(new_param) index += params_length class TNPGNew(nn.Module): def __init__(self, num_inputs, num_outputs): super(TNPGNew, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc_1 = nn.Linear(num_inputs, 128) self.fc_2 = nn.Linear(128, num_outputs) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) @classmethod def train_model(cls, net, transitions): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) returns = torch.zeros_like(rewards) running_return = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] returns[t] = running_return policies = net(states) policies = policies.view(-1, net.num_outputs) policy_actions = (policies * actions.detach()).sum(dim=1) loss = (policy_actions * returns).mean() loss_grad = torch.autograd.grad(loss, net.parameters()) loss_grad = flat_grad(loss_grad) step_dir = conjugate_gradient(net, states, loss_grad.data) params = flat_params(net) new_params = params + lr * step_dir update_model(net, new_params) return -loss def get_action(self, input): policy = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def forward(self, input_0): primals_1 = self.fc_1.weight primals_2 = self.fc_1.bias primals_4 = self.fc_2.weight primals_5 = self.fc_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
g6ling/Pytorch-Cartpole
TNPG
false
15,387
[ "MIT" ]
116
ecb7b622cfefe825ac95388cceb6752413d90a2a
https://github.com/g6ling/Pytorch-Cartpole/tree/ecb7b622cfefe825ac95388cceb6752413d90a2a