entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
LayerNorm
import torch import torch.nn as nn from torch.nn import Parameter from torch.nn.parameter import Parameter from torch.nn.modules.normalization import LayerNorm from torch.optim.lr_scheduler import * class LayerNorm(nn.Module): def __init__(self, hidden_size, eps=0.0001): super(LayerNorm, self).__init__() self.alpha = Parameter(torch.ones(1, 1, hidden_size)) self.beta = Parameter(torch.zeros(1, 1, hidden_size)) self.eps = eps def forward(self, x): """ Args: :param x: batch * len * input_size Returns: normalized x """ mu = torch.mean(x, 2, keepdim=True).expand_as(x) sigma = torch.std(x, 2, keepdim=True).expand_as(x) return (x - mu) / (sigma + self.eps) * self.alpha.expand_as(x ) + self.beta.expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter from torch.nn.parameter import Parameter from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 0.0001 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, hidden_size, eps=0.0001): super(LayerNormNew, self).__init__() self.alpha = Parameter(torch.ones(1, 1, hidden_size)) self.beta = Parameter(torch.zeros(1, 1, hidden_size)) self.eps = eps def forward(self, input_0): primals_2 = self.alpha primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
anlewy/mt-dnn
LayerNorm
false
14,879
[ "MIT" ]
2,075
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
KDLoss
import torch import torch.nn as nn import torch.nn.functional as F class KDLoss(nn.Module): def __init__(self, temp: 'float', reduction: 'str'): super(KDLoss, self).__init__() self.temp = temp self.reduction = reduction self.kl_loss = nn.KLDivLoss(reduction=reduction) def forward(self, teacher_logits: 'torch.Tensor', student_logits: 'torch.Tensor'): student_softmax = F.log_softmax(student_logits / self.temp, dim=-1) teacher_softmax = F.softmax(teacher_logits / self.temp, dim=-1) kl = nn.KLDivLoss(reduction='none')(student_softmax, teacher_softmax) kl = kl.sum() if self.reduction == 'sum' else kl.sum(1).mean() kl = kl * self.temp ** 2 return kl def __call__(self, *args, **kwargs): return super(KDLoss, self).__call__(*args, **kwargs) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'temp': 4, 'reduction': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_sub_sum_xlogy_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex // 16 r4 = rindex % 16 r1 = rindex // 4 % 4 tmp0 = tl.load(in_ptr0 + (r4 + 64 * r2), None) tmp9 = tl.load(in_ptr1 + (r4 + 64 * r2), None) tmp10 = tl.load(in_ptr1 + (4 * r1 + 64 * r2), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (1 + 4 * r1 + 64 * r2), None, eviction_policy ='evict_last') tmp15 = tl.load(in_ptr1 + (2 + 4 * r1 + 64 * r2), None, eviction_policy ='evict_last') tmp18 = tl.load(in_ptr1 + (3 + 4 * r1 + 64 * r2), None, eviction_policy ='evict_last') tmp25 = tl.load(in_ptr0 + (16 + r4 + 64 * r2), None) tmp32 = tl.load(in_ptr1 + (16 + r4 + 64 * r2), None) tmp33 = tl.load(in_ptr1 + (16 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr1 + (17 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr1 + (18 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp41 = tl.load(in_ptr1 + (19 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp49 = tl.load(in_ptr0 + (32 + r4 + 64 * r2), None) tmp56 = tl.load(in_ptr1 + (32 + r4 + 64 * r2), None) tmp57 = tl.load(in_ptr1 + (32 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr1 + (33 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr1 + (34 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp65 = tl.load(in_ptr1 + (35 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp73 = tl.load(in_ptr0 + (48 + r4 + 64 * r2), None) tmp80 = tl.load(in_ptr1 + (48 + r4 + 64 * r2), None) tmp81 = tl.load(in_ptr1 + (48 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp83 = tl.load(in_ptr1 + (49 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp86 = tl.load(in_ptr1 + (50 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp89 = tl.load(in_ptr1 + (51 + 4 * r1 + 64 * r2), None, eviction_policy='evict_last') tmp1 = libdevice.isnan(tmp0).to(tl.int1) tmp2 = 0.0 tmp3 = tmp0 == tmp2 tmp4 = tl_math.log(tmp0) tmp5 = tmp0 * tmp4 tmp6 = tl.where(tmp3, tmp2, tmp5) tmp7 = float('nan') tmp8 = tl.where(tmp1, tmp7, tmp6) tmp11 = tl_math.exp(tmp10) tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tl_math.log(tmp20) tmp22 = tmp9 - tmp21 tmp23 = tmp0 * tmp22 tmp24 = tmp8 - tmp23 tmp26 = libdevice.isnan(tmp25).to(tl.int1) tmp27 = tmp25 == tmp2 tmp28 = tl_math.log(tmp25) tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp2, tmp29) tmp31 = tl.where(tmp26, tmp7, tmp30) tmp34 = tl_math.exp(tmp33) tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tmp42 = tl_math.exp(tmp41) tmp43 = tmp40 + tmp42 tmp44 = tl_math.log(tmp43) tmp45 = tmp32 - tmp44 tmp46 = tmp25 * tmp45 tmp47 = tmp31 - tmp46 tmp48 = tmp24 + tmp47 tmp50 = libdevice.isnan(tmp49).to(tl.int1) tmp51 = tmp49 == tmp2 tmp52 = tl_math.log(tmp49) tmp53 = tmp49 * tmp52 tmp54 = tl.where(tmp51, tmp2, tmp53) tmp55 = tl.where(tmp50, tmp7, tmp54) tmp58 = tl_math.exp(tmp57) tmp60 = tl_math.exp(tmp59) tmp61 = tmp58 + tmp60 tmp63 = tl_math.exp(tmp62) tmp64 = tmp61 + tmp63 tmp66 = tl_math.exp(tmp65) tmp67 = tmp64 + tmp66 tmp68 = tl_math.log(tmp67) tmp69 = tmp56 - tmp68 tmp70 = tmp49 * tmp69 tmp71 = tmp55 - tmp70 tmp72 = tmp48 + tmp71 tmp74 = libdevice.isnan(tmp73).to(tl.int1) tmp75 = tmp73 == tmp2 tmp76 = tl_math.log(tmp73) tmp77 = tmp73 * tmp76 tmp78 = tl.where(tmp75, tmp2, tmp77) tmp79 = tl.where(tmp74, tmp7, tmp78) tmp82 = tl_math.exp(tmp81) tmp84 = tl_math.exp(tmp83) tmp85 = tmp82 + tmp84 tmp87 = tl_math.exp(tmp86) tmp88 = tmp85 + tmp87 tmp90 = tl_math.exp(tmp89) tmp91 = tmp88 + tmp90 tmp92 = tl_math.log(tmp91) tmp93 = tmp80 - tmp92 tmp94 = tmp73 * tmp93 tmp95 = tmp79 - tmp94 tmp96 = tmp72 + tmp95 tmp97 = tl.broadcast_to(tmp96, [XBLOCK, RBLOCK]) tmp99 = tl.sum(tmp97, 1)[:, None] tmp100 = 64.0 tmp101 = tmp99 / tmp100 tmp102 = 16.0 tmp103 = tmp101 * tmp102 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp103, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused_2[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused__log_softmax_mean_mul_sub_sum_xlogy_3[grid(1)](buf5, buf1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 return buf5, class KDLossNew(nn.Module): def __init__(self, temp: 'float', reduction: 'str'): super(KDLossNew, self).__init__() self.temp = temp self.reduction = reduction self.kl_loss = nn.KLDivLoss(reduction=reduction) def __call__(self, *args, **kwargs): return super(KDLossNew, self).__call__(*args, **kwargs) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
angpo/VKD
KDLoss
false
14,880
[ "MIT" ]
68
2a136e00dad4c73612d6efe087675604ac2416eb
https://github.com/angpo/VKD/tree/2a136e00dad4c73612d6efe087675604ac2416eb
Correct
import torch from torch import nn import torch.utils.data.distributed class Correct(nn.Module): def forward(self, classifier, target): return classifier.max(dim=1)[1] == target def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x2, tmp46, xmask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 == tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(256)](buf0, arg1_1, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg1_1 del buf0 return buf1, class CorrectNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
aoranwu/grace
Correct
false
14,881
[ "BSD-2-Clause" ]
88
1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e
https://github.com/aoranwu/grace/tree/1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e
Pooler
import torch import torch.nn.functional as F import torch.nn as nn from torch.optim.lr_scheduler import * def linear(x): return x def activation(func_a): """Activation function wrapper """ try: f = eval(func_a) except: f = linear return f class DropoutWrapper(nn.Module): """ This is a dropout wrapper which supports the fix mask dropout """ def __init__(self, dropout_p=0, enable_vbp=True): super(DropoutWrapper, self).__init__() """variational dropout means fix dropout mask ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11 """ self.enable_variational_dropout = enable_vbp self.dropout_p = dropout_p def forward(self, x): """ :param x: batch * len * input_size """ if self.training is False or self.dropout_p == 0: return x if len(x.size()) == 3: mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self. dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)) mask.requires_grad = False return mask.unsqueeze(1).expand_as(x) * x else: return F.dropout(x, p=self.dropout_p, training=self.training) class Pooler(nn.Module): def __init__(self, hidden_size, dropout_p=0.1, actf='tanh'): super(Pooler, self).__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.activation = activation(actf) self.dropout = DropoutWrapper(dropout_p=dropout_p) def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] first_token_tensor = self.dropout(first_token_tensor) pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0) def linear(x): return x def activation(func_a): """Activation function wrapper """ try: f = eval(func_a) except: f = linear return f class DropoutWrapper(nn.Module): """ This is a dropout wrapper which supports the fix mask dropout """ def __init__(self, dropout_p=0, enable_vbp=True): super(DropoutWrapper, self).__init__() """variational dropout means fix dropout mask ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11 """ self.enable_variational_dropout = enable_vbp self.dropout_p = dropout_p def forward(self, x): """ :param x: batch * len * input_size """ if self.training is False or self.dropout_p == 0: return x if len(x.size()) == 3: mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self. dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)) mask.requires_grad = False return mask.unsqueeze(1).expand_as(x) * x else: return F.dropout(x, p=self.dropout_p, training=self.training) class PoolerNew(nn.Module): def __init__(self, hidden_size, dropout_p=0.1, actf='tanh'): super(PoolerNew, self).__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.activation = activation(actf) self.dropout = DropoutWrapper(dropout_p=dropout_p) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
anlewy/mt-dnn
Pooler
false
14,882
[ "MIT" ]
2,075
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
https://github.com/anlewy/mt-dnn/tree/eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
Conv2dTime
import torch import torch.nn as nn class Conv2dTime(nn.Conv2d): """ Implements time dependent 2d convolutions, by appending the time variable as an extra channel. """ def __init__(self, in_channels, *args, **kwargs): super(Conv2dTime, self).__init__(in_channels + 1, *args, **kwargs) def forward(self, t, x): t_img = torch.ones_like(x[:, :1, :, :]) * t t_and_x = torch.cat([t_img, x], 1) return super(Conv2dTime, self).forward(t_and_x) def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 5 x0 = xindex % 16 x2 = xindex // 80 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(320)](primals_2, primals_1, buf0, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 return buf2, primals_3, buf0 class Conv2dTimeNew(nn.Conv2d): """ Implements time dependent 2d convolutions, by appending the time variable as an extra channel. """ def __init__(self, in_channels, *args, **kwargs): super(Conv2dTimeNew, self).__init__(in_channels + 1, *args, **kwargs) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
anway/augmented-neural-odes
Conv2dTime
false
14,883
[ "MIT" ]
449
561cfa540ef292d117ba9cf083758281774f3f22
https://github.com/anway/augmented-neural-odes/tree/561cfa540ef292d117ba9cf083758281774f3f22
MaskedHuberLoss
import torch import torch.nn as nn class MaskedHuberLoss(torch.nn.Module): def __init__(self): super(MaskedHuberLoss, self).__init__() def forward(self, output, labels, mask): lossHuber = nn.SmoothL1Loss(reduction='none') l = lossHuber(output * mask, labels * mask) l = l.sum(dim=(1, 2)) mask = mask.sum(dim=(1, 2)) l = l / mask return l.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_smooth_l1_loss_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp3 = tl.load(in_ptr2 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = 1.0 tmp8 = tmp6 < tmp7 tmp9 = tmp6 * tmp6 tmp10 = 0.5 tmp11 = tmp9 * tmp10 tmp12 = tmp11 * tmp7 tmp13 = tmp6 - tmp10 tmp14 = tl.where(tmp8, tmp12, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp21 = tl.where(xmask, tmp19, 0) tmp22 = tl.sum(tmp21, 1)[:, None] tl.store(out_ptr0 + x3, tmp18, xmask) tl.store(out_ptr1 + x3, tmp22, xmask) @triton.jit def triton_per_fused_div_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 / tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mul_smooth_l1_loss_sum_0[grid(16)](arg0_1, arg1_1, arg2_1, buf0, buf1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_div_mean_1[grid(1)](buf3, buf0, buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, class MaskedHuberLossNew(torch.nn.Module): def __init__(self): super(MaskedHuberLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
anshulpaigwar/GndNet
MaskedHuberLoss
false
14,884
[ "MIT" ]
73
24328602a8cbaeabe67cafbf1b96c35f5c5c9023
https://github.com/anshulpaigwar/GndNet/tree/24328602a8cbaeabe67cafbf1b96c35f5c5c9023
Lambda3
import torch from typing import Tuple from torch import nn from abc import ABC from abc import abstractmethod class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class Lambda3(Regularizer): def __init__(self, weight: 'float'): super(Lambda3, self).__init__() self.weight = weight def forward(self, factor): ddiff = factor[1:] - factor[:-1] rank = int(ddiff.shape[1] / 2) diff = torch.sqrt(ddiff[:, :rank] ** 2 + ddiff[:, rank:] ** 2) ** 3 return self.weight * torch.sum(diff) / (factor.shape[0] - 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from typing import Tuple from torch import nn from abc import ABC from abc import abstractmethod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_pow_sqrt_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 96 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 32 r1 = rindex // 32 tmp0 = tl.load(in_ptr0 + (64 + r0 + 64 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r1), rmask, other=0.0) tmp4 = tl.load(in_ptr0 + (96 + r0 + 64 * r1), rmask, other=0.0) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp10 * tmp9 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = 4.0 tmp17 = tmp15 * tmp16 tmp18 = 0.3333333333333333 tmp19 = tmp17 * tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_pow_sqrt_sum_0[grid(1)](buf1, arg0_1, 1, 96, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class Lambda3New(Regularizer): def __init__(self, weight: 'float'): super(Lambda3New, self).__init__() self.weight = weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
apoorvumang/Temporal_KGQA
Lambda3
false
14,885
[ "MIT" ]
49
3e2a7c31865235ee2511a7ae0ea0701c12896327
https://github.com/apoorvumang/Temporal_KGQA/tree/3e2a7c31865235ee2511a7ae0ea0701c12896327
N3
import torch from typing import Tuple from torch import nn from abc import ABC from abc import abstractmethod class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class N3(Regularizer): def __init__(self, weight: 'float'): super(N3, self).__init__() self.weight = weight def forward(self, factors): norm = 0 for f in factors: norm += self.weight * torch.sum(torch.abs(f) ** 3) return norm / factors[0].shape[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from typing import Tuple from torch import nn from abc import ABC from abc import abstractmethod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mul_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr0 + (64 + r0), None) tmp14 = tl.load(in_ptr0 + (128 + r0), None) tmp21 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tl_math.abs(tmp0) tmp2 = tmp1 * tmp1 tmp3 = tmp2 * tmp1 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp8 = tl_math.abs(tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp9 * tmp8 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp15 = tl_math.abs(tmp14) tmp16 = tmp15 * tmp15 tmp17 = tmp16 * tmp15 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp22 = tl_math.abs(tmp21) tmp23 = tmp22 * tmp22 tmp24 = tmp23 * tmp22 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 4.0 tmp29 = tmp6 * tmp28 tmp30 = 0.0 tmp31 = tmp29 + tmp30 tmp32 = tmp13 * tmp28 tmp33 = tmp31 + tmp32 tmp34 = tmp20 * tmp28 tmp35 = tmp33 + tmp34 tmp36 = tmp27 * tmp28 tmp37 = tmp35 + tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mul_pow_sum_0[grid(1)](buf4, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class N3New(Regularizer): def __init__(self, weight: 'float'): super(N3New, self).__init__() self.weight = weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
apoorvumang/Temporal_KGQA
N3
false
14,886
[ "MIT" ]
49
3e2a7c31865235ee2511a7ae0ea0701c12896327
https://github.com/apoorvumang/Temporal_KGQA/tree/3e2a7c31865235ee2511a7ae0ea0701c12896327
ConConv
import torch import torch.nn as nn class ConConv(nn.Module): def __init__(self, inplanes_x1, inplanes_x2, planes): super(ConConv, self).__init__() self.conv = nn.Conv2d(inplanes_x1 + inplanes_x2, planes, kernel_size=1, bias=True) def forward(self, x1, x2): x1 = torch.cat([x2, x1], dim=1) x1 = self.conv(x1) return x1 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes_x1': 4, 'inplanes_x2': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 return buf2, primals_3, buf0 class ConConvNew(nn.Module): def __init__(self, inplanes_x1, inplanes_x2, planes): super(ConConvNew, self).__init__() self.conv = nn.Conv2d(inplanes_x1 + inplanes_x2, planes, kernel_size=1, bias=True) def forward(self, input_0, input_1): primals_3 = self.conv.weight primals_4 = self.conv.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
anve96/DE_resnet_unet_hyb
ConConv
false
14,887
[ "BSD-3-Clause" ]
45
f0751854c8707cc4f228bb9d52d93635cc3584ae
https://github.com/anve96/DE_resnet_unet_hyb/tree/f0751854c8707cc4f228bb9d52d93635cc3584ae
Conv2
import math import torch import torch.nn as nn import torch.utils.data.distributed class Conv2(nn.Module): """ A convolution layer with the stride of 2. Input: x: (N, 2L+2, in_channels) numeric tensor global_cond: (N, global_cond_channels) numeric tensor Output: y: (N, L, out_channels) numeric tensor """ def __init__(self, in_channels, out_channels, global_cond_channels): super().__init__() ksz = 4 self.out_channels = out_channels if 0 < global_cond_channels: self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels, bias=False) self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2 ) wsize = 2.967 / math.sqrt(ksz * in_channels) self.conv_wide.weight.data.uniform_(-wsize, wsize) self.conv_wide.bias.data.zero_() def forward(self, x, global_cond): x1 = self.conv_wide(x.transpose(1, 2)).transpose(1, 2) if global_cond is not None: x2 = self.w_cond(global_cond).unsqueeze(1).expand(-1, x1.size(1 ), -1) else: x2 = torch.zeros_like(x1) a, b = (x1 + x2).split(self.out_channels, dim=2) return torch.sigmoid(a) * torch.tanh(b) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'global_cond_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 8 * x1), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x0 + 8 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp12 = tmp5 * tmp11 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 1), (8, 1, 1)) del buf0 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_1[grid(16)](buf1, primals_3, buf2, buf3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf2 del primals_3 return buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf3, buf4 class Conv2New(nn.Module): """ A convolution layer with the stride of 2. Input: x: (N, 2L+2, in_channels) numeric tensor global_cond: (N, global_cond_channels) numeric tensor Output: y: (N, L, out_channels) numeric tensor """ def __init__(self, in_channels, out_channels, global_cond_channels): super().__init__() ksz = 4 self.out_channels = out_channels if 0 < global_cond_channels: self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels, bias=False) self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2 ) wsize = 2.967 / math.sqrt(ksz * in_channels) self.conv_wide.weight.data.uniform_(-wsize, wsize) self.conv_wide.bias.data.zero_() def forward(self, input_0, input_1): primals_5 = self.w_cond.weight primals_2 = self.conv_wide.weight primals_3 = self.conv_wide.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
aperquin/Extended_VQVAE
Conv2
false
14,888
[ "MIT" ]
55
46d309643c3fe3663e6fbd2fd6dd6b768341863b
https://github.com/aperquin/Extended_VQVAE/tree/46d309643c3fe3663e6fbd2fd6dd6b768341863b
ConvFunc
import torch import torch.nn as nn class ConvFunc(nn.Module): """Convolutional block, non-ODE. Parameters ---------- device : torch.device img_size : tuple of ints Tuple of (channels, height, width). num_filters : int Number of convolutional filters. augment_dim: int Number of augmentation channels to add. If 0 does not augment ODE. non_linearity : string One of 'relu' and 'softplus' """ def __init__(self, device, img_size, num_filters, augment_dim=0, non_linearity='relu'): super(ConvFunc, self).__init__() self.device = device self.augment_dim = augment_dim self.img_size = img_size self.channels, self.height, self.width = img_size self.channels += augment_dim self.num_filters = num_filters self.nfe = 0 self.conv1 = nn.Conv2d(self.channels, self.num_filters, kernel_size =1, stride=1, padding=0) self.conv2 = nn.Conv2d(self.num_filters, self.num_filters, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(self.num_filters, self.channels, kernel_size =1, stride=1, padding=0) if non_linearity == 'relu': self.non_linearity = nn.ReLU(inplace=True) elif non_linearity == 'softplus': self.non_linearity = nn.Softplus() def forward(self, x): """ Parameters ---------- x : torch.Tensor Shape (batch_size, input_dim) """ out = self.conv1(x) out = self.non_linearity(out) out = self.conv2(out) out = self.non_linearity(out) out = self.conv3(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'device': 0, 'img_size': [4, 4, 4], 'num_filters': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_1[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3 class ConvFuncNew(nn.Module): """Convolutional block, non-ODE. Parameters ---------- device : torch.device img_size : tuple of ints Tuple of (channels, height, width). num_filters : int Number of convolutional filters. augment_dim: int Number of augmentation channels to add. If 0 does not augment ODE. non_linearity : string One of 'relu' and 'softplus' """ def __init__(self, device, img_size, num_filters, augment_dim=0, non_linearity='relu'): super(ConvFuncNew, self).__init__() self.device = device self.augment_dim = augment_dim self.img_size = img_size self.channels, self.height, self.width = img_size self.channels += augment_dim self.num_filters = num_filters self.nfe = 0 self.conv1 = nn.Conv2d(self.channels, self.num_filters, kernel_size =1, stride=1, padding=0) self.conv2 = nn.Conv2d(self.num_filters, self.num_filters, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(self.num_filters, self.channels, kernel_size =1, stride=1, padding=0) if non_linearity == 'relu': self.non_linearity = nn.ReLU(inplace=True) elif non_linearity == 'softplus': self.non_linearity = nn.Softplus() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
anway/augmented-neural-odes
ConvFunc
false
14,889
[ "MIT" ]
449
561cfa540ef292d117ba9cf083758281774f3f22
https://github.com/anway/augmented-neural-odes/tree/561cfa540ef292d117ba9cf083758281774f3f22
ContinousRotReprDecoder
import torch import torch.nn as nn import torch.nn.functional as F class ContinousRotReprDecoder(nn.Module): def __init__(self): super(ContinousRotReprDecoder, self).__init__() def forward(self, module_input): reshaped_input = module_input.view(-1, 3, 2) b1 = F.normalize(reshaped_input[:, :, 0], dim=1) dot_prod = torch.sum(b1 * reshaped_input[:, :, 1], dim=1, keepdim=True) b2 = F.normalize(reshaped_input[:, :, 1] - dot_prod * b1, dim=-1) b3 = torch.cross(b1, b2, dim=1) return torch.stack([b1, b2, b3], dim=-1) def get_inputs(): return [torch.rand([4, 3, 2])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-12 tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = tmp0 / tmp10 tmp13 = tmp11 * tmp12 tmp14 = tmp2 / tmp10 tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tmp5 / tmp10 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tl.store(out_ptr0 + x0, tmp21, xmask) @triton.jit def triton_poi_fused_div_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_ptr0 + (1 + 2 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + 2 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + 6 * x1), xmask, eviction_policy='evict_last') tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp2 / tmp13 tmp15 = tmp1 * tmp14 tmp16 = tmp0 - tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_div_linalg_cross_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * ((1 + x0) % 3) + 6 * x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (4 + 6 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (3 * x1 + (2 + x0) % 3), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 3 * x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 3 * x1), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr0 + (2 * ((2 + x0) % 3) + 6 * x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 * x1 + (1 + x0) % 3), xmask) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = 1e-12 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tmp0 / tmp11 tmp15 = tmp14 * tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = libdevice.sqrt(tmp21) tmp23 = triton_helpers.maximum(tmp22, tmp10) tmp24 = tmp13 / tmp23 tmp25 = tmp12 * tmp24 tmp27 = tmp26 / tmp11 tmp29 = tmp28 / tmp23 tmp30 = tmp27 * tmp29 tl.store(out_ptr0 + x2, tmp25, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x3 = xindex // 3 x2 = xindex // 9 x5 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 2 * x3, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + 6 * x2, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp6 * tmp6 tmp8 = tl.load(in_ptr0 + (2 + 6 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr0 + (4 + 6 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = 1e-12 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = tmp5 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp4, tmp17, tmp18) tmp20 = tmp0 >= tmp3 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr1 + x3, tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + 3 * x2, tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp25 * tmp25 tmp27 = tl.load(in_ptr1 + (1 + 3 * x2), tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tmp27 * tmp27 tmp29 = tmp26 + tmp28 tmp30 = tl.load(in_ptr1 + (2 + 3 * x2), tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tmp30 * tmp30 tmp32 = tmp29 + tmp31 tmp33 = libdevice.sqrt(tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp15) tmp35 = tmp24 / tmp34 tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp23, tmp35, tmp36) tmp38 = tmp0 >= tmp21 tl.full([1], 3, tl.int64) tmp41 = tl.load(in_ptr2 + x3, tmp38 & xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr3 + x3, tmp38 & xmask, eviction_policy= 'evict_last', other=0.0) tmp43 = tmp41 - tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp38, tmp43, tmp44) tmp46 = tl.where(tmp23, tmp37, tmp45) tmp47 = tl.where(tmp4, tmp19, tmp46) tl.store(out_ptr0 + x5, tmp47, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 3, 2), (6, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sum_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 3), (3, 1), torch.float32) triton_poi_fused_div_mul_sub_sum_1[grid(12)](arg0_1, buf0, buf1, 12, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 3), (3, 1), torch.float32) buf3 = empty_strided_cuda((4, 3), (3, 1), torch.float32) triton_poi_fused_div_linalg_cross_2[grid(12)](arg0_1, buf1, buf2, buf3, 12, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) triton_poi_fused_stack_3[grid(36)](arg0_1, buf1, buf2, buf3, buf4, 36, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf1 del buf2 del buf3 return buf4, class ContinousRotReprDecoderNew(nn.Module): def __init__(self): super(ContinousRotReprDecoderNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
antic11d/human_body_prior
ContinousRotReprDecoder
false
14,890
[ "Xnet", "X11" ]
412
ba4eaf9ee69a83a874805b764e0f984ba057ffc6
https://github.com/antic11d/human_body_prior/tree/ba4eaf9ee69a83a874805b764e0f984ba057ffc6
TorchEntityRecognizer
import torch from typing import List from collections import OrderedDict from torch import nn def is_dropout_module(module: 'nn.Module', dropout_modules: 'List[nn.Module]'=[nn.Dropout, nn.Dropout2d, nn.Dropout3d]) ->bool: """Detect if a PyTorch Module is a Dropout layer module (nn.Module): Module to check dropout_modules (List[nn.Module], optional): List of Modules that count as Dropout layers. RETURNS (bool): True if module is a Dropout layer. """ for m in dropout_modules: if isinstance(module, m): return True return False class TorchEntityRecognizer(nn.Module): """Torch Entity Recognizer Model Head""" def __init__(self, nI: 'int', nH: 'int', nO: 'int', dropout: 'float'): """Initialize TorchEntityRecognizer. nI (int): Input Dimension nH (int): Hidden Dimension Width nO (int): Output Dimension Width dropout (float): Dropout ratio (0 - 1.0) """ super(TorchEntityRecognizer, self).__init__() nI = nI or 1 nO = nO or 1 self.nH = nH self.model = nn.Sequential(OrderedDict({'input_layer': nn.Linear(nI, nH), 'input_activation': nn.ReLU(), 'input_dropout': nn. Dropout2d(dropout), 'output_layer': nn.Linear(nH, nO), 'output_dropout': nn.Dropout2d(dropout), 'softmax': nn.Softmax( dim=1)})) def forward(self, inputs: 'torch.Tensor') ->torch.Tensor: """Forward pass of the model. inputs (torch.Tensor): Batch of outputs from spaCy tok2vec layer RETURNS (torch.Tensor): Batch of results with a score for each tag for each token """ return self.model(inputs) def _set_layer_shape(self, name: 'str', nI: 'int', nO: 'int'): """Dynamically set the shape of a layer name (str): Layer name nI (int): New input shape nO (int): New output shape """ with torch.no_grad(): layer = getattr(self.model, name) layer.out_features = nO layer.weight = nn.Parameter(torch.Tensor(nO, nI)) if layer.bias is not None: layer.bias = nn.Parameter(torch.Tensor(nO)) layer.reset_parameters() def set_input_shape(self, nI: 'int'): """Dynamically set the shape of the input layer nI (int): New input layer shape """ self._set_layer_shape('input_layer', nI, self.nH) def set_output_shape(self, nO: 'int'): """Dynamically set the shape of the output layer nO (int): New output layer shape """ self._set_layer_shape('output_layer', self.nH, nO) def set_dropout_rate(self, dropout: 'float'): """Set the dropout rate of all Dropout layers in the model. dropout (float): Dropout rate to set """ dropout_layers = [module for module in self.modules() if is_dropout_module(module)] for layer in dropout_layers: layer.p = dropout def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nI': 4, 'nH': 4, 'nO': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from typing import List from collections import OrderedDict from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5 def is_dropout_module(module: 'nn.Module', dropout_modules: 'List[nn.Module]'=[nn.Dropout, nn.Dropout2d, nn.Dropout3d]) ->bool: """Detect if a PyTorch Module is a Dropout layer module (nn.Module): Module to check dropout_modules (List[nn.Module], optional): List of Modules that count as Dropout layers. RETURNS (bool): True if module is a Dropout layer. """ for m in dropout_modules: if isinstance(module, m): return True return False class TorchEntityRecognizerNew(nn.Module): """Torch Entity Recognizer Model Head""" def __init__(self, nI: 'int', nH: 'int', nO: 'int', dropout: 'float'): """Initialize TorchEntityRecognizer. nI (int): Input Dimension nH (int): Hidden Dimension Width nO (int): Output Dimension Width dropout (float): Dropout ratio (0 - 1.0) """ super(TorchEntityRecognizerNew, self).__init__() nI = nI or 1 nO = nO or 1 self.nH = nH self.model = nn.Sequential(OrderedDict({'input_layer': nn.Linear(nI, nH), 'input_activation': nn.ReLU(), 'input_dropout': nn. Dropout2d(dropout), 'output_layer': nn.Linear(nH, nO), 'output_dropout': nn.Dropout2d(dropout), 'softmax': nn.Softmax( dim=1)})) def _set_layer_shape(self, name: 'str', nI: 'int', nO: 'int'): """Dynamically set the shape of a layer name (str): Layer name nI (int): New input shape nO (int): New output shape """ with torch.no_grad(): layer = getattr(self.model, name) layer.out_features = nO layer.weight = nn.Parameter(torch.Tensor(nO, nI)) if layer.bias is not None: layer.bias = nn.Parameter(torch.Tensor(nO)) layer.reset_parameters() def set_input_shape(self, nI: 'int'): """Dynamically set the shape of the input layer nI (int): New input layer shape """ self._set_layer_shape('input_layer', nI, self.nH) def set_output_shape(self, nO: 'int'): """Dynamically set the shape of the output layer nO (int): New output layer shape """ self._set_layer_shape('output_layer', self.nH, nO) def set_dropout_rate(self, dropout: 'float'): """Set the dropout rate of all Dropout layers in the model. dropout (float): Dropout rate to set """ dropout_layers = [module for module in self.modules() if is_dropout_module(module)] for layer in dropout_layers: layer.p = dropout def forward(self, input_0): primals_1 = self.model.input_layer.weight primals_2 = self.model.input_layer.bias primals_4 = self.model.output_layer.weight primals_5 = self.model.output_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
apjanco/projects
TorchEntityRecognizer
false
14,891
[ "MIT" ]
823
2f8850140ba13ab18b9cf622e46e79013d41701f
https://github.com/apjanco/projects/tree/2f8850140ba13ab18b9cf622e46e79013d41701f
Cnv2d_separable
import time import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data from time import time as time class Cnv2d_separable(nn.Module): def __init__(self, n_input_ch, n_output_ch, kernel_size, stride, padding, bias=False, red_portion=0.5): super(Cnv2d_separable, self).__init__() self.n_input_ch = n_input_ch self.n_input_ch_red = int(n_input_ch * red_portion) self.n_output_ch = n_output_ch self.n_output_ch_red = int(n_output_ch * red_portion) self.n_output_ch_green = n_output_ch - self.n_output_ch_red self.conv_half = nn.Conv2d(self.n_input_ch_red, self. n_output_ch_red, kernel_size, stride, padding, bias=bias) self.conv_all = nn.Conv2d(self.n_input_ch, self.n_output_ch_green, kernel_size, stride, padding, bias=bias) def forward(self, input): first_half = input[:, :self.n_input_ch_red, :, :] first_half_conv = self.conv_half(first_half) full_conv = self.conv_all(input) all_conv = torch.cat((first_half_conv, full_conv), 1) return all_conv def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_input_ch': 4, 'n_output_ch': 4, 'kernel_size': 4, 'stride': 1, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import time import torch.nn as nn import torch.nn.parallel import torch.utils.data from time import time as time assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 81 % 4 x0 = xindex % 81 x2 = xindex // 324 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 81 * x1 + 162 * x2), tmp4 & xmask, other=0.0 ) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 81 * (-2 + x1) + 162 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_3, (2, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 2, 4, 4), (64, 16, 4, 1), 0), primals_2, stride=(1, 1), padding =(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 9, 9), (162, 81, 9, 1)) buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 2, 9, 9), (162, 81, 9, 1)) buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1296)](buf0, buf1, buf2, 1296, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 del buf1 return buf2, primals_1, primals_2, primals_3 class Cnv2d_separableNew(nn.Module): def __init__(self, n_input_ch, n_output_ch, kernel_size, stride, padding, bias=False, red_portion=0.5): super(Cnv2d_separableNew, self).__init__() self.n_input_ch = n_input_ch self.n_input_ch_red = int(n_input_ch * red_portion) self.n_output_ch = n_output_ch self.n_output_ch_red = int(n_output_ch * red_portion) self.n_output_ch_green = n_output_ch - self.n_output_ch_red self.conv_half = nn.Conv2d(self.n_input_ch_red, self. n_output_ch_red, kernel_size, stride, padding, bias=bias) self.conv_all = nn.Conv2d(self.n_input_ch, self.n_output_ch_green, kernel_size, stride, padding, bias=bias) def forward(self, input_0): primals_2 = self.conv_half.weight primals_3 = self.conv_all.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
aosokin/biogans
Cnv2d_separable
false
14,892
[ "Apache-2.0" ]
105
cb72bb0457be335fad6c27a16bb1761b937a6d06
https://github.com/aosokin/biogans/tree/cb72bb0457be335fad6c27a16bb1761b937a6d06
HuberLoss
import torch import torch.nn as nn class HuberLoss(nn.Module): def __init__(self, delta=1): super().__init__() self.delta = delta def forward(self, sr, hr): l1 = torch.abs(sr - hr) mask = l1 < self.delta sq_loss = 0.5 * l1 ** 2 abs_loss = self.delta * (l1 - 0.5 * self.delta) return torch.mean(mask * sq_loss + ~mask * abs_loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_bitwise_not_lt_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp5.to(tl.float32) tmp7 = tmp3 * tmp3 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 * tmp9 tmp11 = tmp5 == 0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp3 - tmp8 tmp14 = tmp13 * tmp4 tmp15 = tmp12 * tmp14 tmp16 = tmp10 + tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 256.0 tmp21 = tmp19 / tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_bitwise_not_lt_mean_mul_pow_sub_0[grid(1)]( buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HuberLossNew(nn.Module): def __init__(self, delta=1): super().__init__() self.delta = delta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
around-star/FLAVR
HuberLoss
false
14,893
[ "Apache-2.0" ]
223
3b0b703fd1c67eb053511a3532f539ff468866a8
https://github.com/around-star/FLAVR/tree/3b0b703fd1c67eb053511a3532f539ff468866a8
MAPELoss
import torch import torch.nn as nn class MAPELoss(nn.Module): def forward(self, input, target): return (torch.abs(input - target) / (torch.abs(target) + 0.01)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl_math.abs(tmp1) tmp5 = 0.01 tmp6 = tmp4 + tmp5 tmp7 = tmp3 / tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MAPELossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
arpan-dhatt/oidn
MAPELoss
false
14,894
[ "Apache-2.0" ]
1,206
9419411ba4b343b475b53587cadd44c83d68dc2a
https://github.com/arpan-dhatt/oidn/tree/9419411ba4b343b475b53587cadd44c83d68dc2a
GeodesicLoss
import torch import torch.nn as nn class GeodesicLoss(nn.Module): def __init__(self, eps=1e-07): super().__init__() self.eps = eps def forward(self, m1, m2): m = torch.bmm(m1, m2.transpose(1, 2)) cos = (m[:, 0, 0] + m[:, 1, 1] + m[:, 2, 2] - 1) / 2 theta = torch.acos(torch.clamp(cos, -1 + self.eps, 1 - self.eps)) return torch.mean(theta) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_acos_add_clamp_div_mean_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = -0.9999999 tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = 0.9999999 tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = libdevice.acos(tmp12) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = 4.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_acos_add_clamp_div_mean_sub_0[grid(1)](buf2, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf2, class GeodesicLossNew(nn.Module): def __init__(self, eps=1e-07): super().__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
arsalan0004/6DRepNet
GeodesicLoss
false
14,895
[ "MIT" ]
84
cdfb2b151785eb89fef70907a6f2a19fa0acf4ae
https://github.com/arsalan0004/6DRepNet/tree/cdfb2b151785eb89fef70907a6f2a19fa0acf4ae
GradientLoss
import torch import torch.nn as nn def tensor_gradient(input): input0 = input[..., :-1, :-1] didy = input[..., 1:, :-1] - input0 didx = input[..., :-1, 1:] - input0 return torch.cat((didy, didx), -3) class GradientLoss(nn.Module): def forward(self, input, target): return torch.abs(tensor_gradient(input) - tensor_gradient(target) ).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_cat_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 288 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r2 = rindex // 9 % 8 r0 = rindex % 3 r1 = rindex // 3 % 3 r3 = rindex // 72 tmp0 = r2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + tl.broadcast_to(4 + r0 + 4 * r1 + 16 * r2 + 64 * r3, [RBLOCK]), rmask & tmp4, other=0.0) tmp6 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 16 * r2 + 64 * r3, [RBLOCK]), rmask & tmp4, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr0 + tl.broadcast_to(1 + r0 + 4 * r1 + 16 * (-4 + r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0) tmp14 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 16 * (-4 + r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0) tmp15 = tmp13 - tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tmp19 = tl.load(in_ptr1 + tl.broadcast_to(4 + r0 + 4 * r1 + 16 * r2 + 64 * r3, [RBLOCK]), rmask & tmp4, other=0.0) tmp20 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 16 * r2 + 64 * r3, [RBLOCK]), rmask & tmp4, other=0.0) tmp21 = tmp19 - tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tl.load(in_ptr1 + tl.broadcast_to(1 + r0 + 4 * r1 + 16 * (-4 + r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0) tmp25 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 16 * (-4 + r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0) tmp26 = tmp24 - tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp10, tmp26, tmp27) tmp29 = tl.where(tmp4, tmp23, tmp28) tmp30 = tmp18 - tmp29 tmp31 = tl_math.abs(tmp30) tmp32 = tl.broadcast_to(tmp31, [RBLOCK]) tmp34 = tl.where(rmask, tmp32, 0) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0)) tmp36 = 288.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_cat_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 288, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf1, def tensor_gradient(input): input0 = input[..., :-1, :-1] didy = input[..., 1:, :-1] - input0 didx = input[..., :-1, 1:] - input0 return torch.cat((didy, didx), -3) class GradientLossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
arpan-dhatt/oidn
GradientLoss
false
14,896
[ "Apache-2.0" ]
1,206
9419411ba4b343b475b53587cadd44c83d68dc2a
https://github.com/arpan-dhatt/oidn/tree/9419411ba4b343b475b53587cadd44c83d68dc2a
SMAPELoss
import torch import torch.nn as nn class SMAPELoss(nn.Module): def forward(self, input, target): return (torch.abs(input - target) / (torch.abs(input) + torch.abs( target) + 0.01)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl_math.abs(tmp0) tmp5 = tl_math.abs(tmp1) tmp6 = tmp4 + tmp5 tmp7 = 0.01 tmp8 = tmp6 + tmp7 tmp9 = tmp3 / tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SMAPELossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
arpan-dhatt/oidn
SMAPELoss
false
14,897
[ "Apache-2.0" ]
1,206
9419411ba4b343b475b53587cadd44c83d68dc2a
https://github.com/arpan-dhatt/oidn/tree/9419411ba4b343b475b53587cadd44c83d68dc2a
PairwiseRankerModel
import torch import torch.onnx import torch.nn as nn class PairwiseRankerModel(nn.Module): def __init__(self, embedding_size): super(PairwiseRankerModel, self).__init__() self.query_doc_transform = torch.nn.Linear(in_features= embedding_size * 2, out_features=embedding_size) self.compare_transform = torch.nn.Linear(in_features=embedding_size * 2, out_features=1) def forward(self, query_embedding, doc_1_embedding, doc_2_embedding): query_doc_1_rep = torch.cat((query_embedding, doc_1_embedding), 1) query_doc_1_rep = torch.sigmoid(self.query_doc_transform( query_doc_1_rep)) query_doc_2_rep = torch.cat((query_embedding, doc_2_embedding), 1) query_doc_2_rep = torch.sigmoid(self.query_doc_transform( query_doc_2_rep)) compare = torch.cat((query_doc_1_rep, query_doc_2_rep), 1) compare = self.compare_transform(compare) return torch.sigmoid(compare) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embedding_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.onnx import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr1 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp12 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.sigmoid(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 8), (8, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, primals_5, buf0, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_5 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_3 del primals_4 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](buf1, buf3, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (8, 1), (1, 8 ), 0), out=buf5) buf6 = buf5 del buf5 triton_poi_fused_sigmoid_2[grid(4)](buf6, primals_7, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_7 return buf6, buf0, buf1, buf2, buf3, buf4, buf6, primals_6 class PairwiseRankerModelNew(nn.Module): def __init__(self, embedding_size): super(PairwiseRankerModelNew, self).__init__() self.query_doc_transform = torch.nn.Linear(in_features= embedding_size * 2, out_features=embedding_size) self.compare_transform = torch.nn.Linear(in_features=embedding_size * 2, out_features=1) def forward(self, input_0, input_1, input_2): primals_3 = self.query_doc_transform.weight primals_4 = self.query_doc_transform.bias primals_6 = self.compare_transform.weight primals_7 = self.compare_transform.bias primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
appotry/sample-apps
PairwiseRankerModel
false
14,898
[ "Apache-2.0" ]
167
6b107ffc67fc917d66fabdeff893b5b7cb157c61
https://github.com/appotry/sample-apps/tree/6b107ffc67fc917d66fabdeff893b5b7cb157c61
NetDropout
import torch from torch import nn from torch.nn import functional as F class NetDropout(nn.Module): def __init__(self, nclasses, img, nchans1=10, dropout_prob=0.4): super().__init__() nchannels, _nrows, _ncols = img.shape self.conv1 = nn.Conv2d(nchannels, nchans1, kernel_size=3, padding=1) self.conv1_dropout = nn.Dropout2d(dropout_prob) def forward(self, x): out = F.max_pool2d(torch.tanh(self.conv1(x)), 2) out = self.conv1_dropout(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nclasses': 4, 'img': torch.rand([4, 4, 4])}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (10, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 4, 4), (160, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_tanh_0[grid(640)](buf1, primals_2, 640, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 10, 2, 2), (40, 4, 2, 1), torch.float32) buf3 = empty_strided_cuda((4, 10, 2, 2), (40, 4, 2, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(160)](buf1, buf2, buf3, 160, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class NetDropoutNew(nn.Module): def __init__(self, nclasses, img, nchans1=10, dropout_prob=0.4): super().__init__() nchannels, _nrows, _ncols = img.shape self.conv1 = nn.Conv2d(nchannels, nchans1, kernel_size=3, padding=1) self.conv1_dropout = nn.Dropout2d(dropout_prob) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
arpitvaghela/probml-notebooks
NetDropout
false
14,899
[ "MIT" ]
166
32ecb309dd474b989fd1c6ce4ad6dab7a25bbead
https://github.com/arpitvaghela/probml-notebooks/tree/32ecb309dd474b989fd1c6ce4ad6dab7a25bbead
ComplexActLayer
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class ComplexActLayer(nn.Module): """ Activation differently 'real' part and 'img' part In implemented DCUnet on this repository, Real part is activated to log space. And Phase(img) part, it is distributed in [-pi, pi]... """ def forward(self, x): real, img = x.chunk(2, 1) return torch.cat([F.leaky_relu_(real), torch.tanh(img) * np.pi], dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = xindex // 32 tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr1 + (x0 + 64 * x1), tmp5, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr0 + (32 + x0 + 16 * (-2 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = 3.141592653589793 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp6, tmp12, tmp13) tmp15 = tl.where(tmp4, tmp5, tmp14) tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(128)](arg0_1, arg0_1, 128, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(256)](arg0_1, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf4, class ComplexActLayerNew(nn.Module): """ Activation differently 'real' part and 'img' part In implemented DCUnet on this repository, Real part is activated to log space. And Phase(img) part, it is distributed in [-pi, pi]... """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ashishpatel26/source_separation
ComplexActLayer
false
14,900
[ "Apache-2.0" ]
269
6f755889654d7207fc89ba03a2f49d9ba92df8ea
https://github.com/ashishpatel26/source_separation/tree/6f755889654d7207fc89ba03a2f49d9ba92df8ea
CNN
import torch class CNN(torch.nn.Module): def __init__(self, n_classes): super(CNN, self).__init__() self.conv = torch.nn.Sequential() self.conv.add_module('conv_1', torch.nn.Conv2d(1, 4, kernel_size=2)) self.conv.add_module('dropout_1', torch.nn.Dropout()) self.conv.add_module('maxpool_1', torch.nn.MaxPool2d(kernel_size=2)) self.conv.add_module('relu_1', torch.nn.ReLU()) self.conv.add_module('conv_2', torch.nn.Conv2d(4, 8, kernel_size=2)) self.conv.add_module('dropout_2', torch.nn.Dropout()) self.conv.add_module('maxpool_2', torch.nn.MaxPool2d(kernel_size=2)) self.conv.add_module('relu_2', torch.nn.ReLU()) self.fc = torch.nn.Sequential() self.fc.add_module('fc1', torch.nn.Linear(8, 32)) self.fc.add_module('relu_3', torch.nn.ReLU()) self.fc.add_module('dropout_3', torch.nn.Dropout()) self.fc.add_module('fc2', torch.nn.Linear(32, n_classes)) def forward(self, x): x = self.conv.forward(x) x = x.view(-1, 8) return self.fc.forward(x) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 63504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3969 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 15376 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x1 = xindex // 31 % 31 x4 = xindex // 961 x3 = xindex // 3844 x5 = xindex % 3844 x6 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 126 * x1 + 3969 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 126 * x1 + 3969 * x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (63 + 2 * x0 + 126 * x1 + 3969 * x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (64 + 2 * x0 + 126 * x1 + 3969 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x5 + 3968 * x3), tmp15, xmask) tl.store(out_ptr1 + x6, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 7200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 15 x4 = xindex // 15 x2 = xindex // 1800 x3 = xindex % 1800 tmp0 = tl.load(in_ptr0 + (2 * x0 + 60 * x4), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 60 * x4), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (30 + 2 * x0 + 60 * x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (31 + 2 * x0 + 60 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + (x3 + 1920 * x2), tmp15, xmask) tl.store(out_ptr1 + (x3 + 1824 * x2), tmp18, xmask) tl.store(out_ptr2 + (x3 + 1920 * x2), tmp20, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 7200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1824 * (x0 // 1800) + x0 % 1800), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (8, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (32, 8), (8, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (4, 32), (32, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 63, 63), (15876, 3969, 63, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(63504)](buf1, primals_2, 63504, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 31, 31), (3968, 961, 31, 1), torch .int8) buf3 = empty_strided_cuda((4, 4, 31, 31), (3844, 961, 31, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_relu_1[grid(15376)](buf1, buf2, buf3, 15376, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 30, 30), (7200, 900, 30, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(28800)](buf5, primals_5, 28800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 8, 15, 15), (1920, 225, 15, 1), torch .int8) buf7 = empty_strided_cuda((4, 8, 15, 15), (1824, 225, 15, 1), torch .float32) buf12 = empty_strided_cuda((4, 8, 15, 15), (1920, 225, 15, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid (7200)](buf5, buf6, buf7, buf12, 7200, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((900, 8), (8, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_view_4[grid(7200)](buf7, buf8, 7200, XBLOCK=256, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((900, 32), (32, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (8, 32), (1, 8), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_relu_5[grid(28800)](buf10, primals_7, 28800, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf11 = empty_strided_cuda((900, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf11) del primals_9 return (buf11, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, buf8, buf10, primals_8, primals_6, buf12) class CNNNew(torch.nn.Module): def __init__(self, n_classes): super(CNNNew, self).__init__() self.conv = torch.nn.Sequential() self.conv.add_module('conv_1', torch.nn.Conv2d(1, 4, kernel_size=2)) self.conv.add_module('dropout_1', torch.nn.Dropout()) self.conv.add_module('maxpool_1', torch.nn.MaxPool2d(kernel_size=2)) self.conv.add_module('relu_1', torch.nn.ReLU()) self.conv.add_module('conv_2', torch.nn.Conv2d(4, 8, kernel_size=2)) self.conv.add_module('dropout_2', torch.nn.Dropout()) self.conv.add_module('maxpool_2', torch.nn.MaxPool2d(kernel_size=2)) self.conv.add_module('relu_2', torch.nn.ReLU()) self.fc = torch.nn.Sequential() self.fc.add_module('fc1', torch.nn.Linear(8, 32)) self.fc.add_module('relu_3', torch.nn.ReLU()) self.fc.add_module('dropout_3', torch.nn.Dropout()) self.fc.add_module('fc2', torch.nn.Linear(32, n_classes)) def forward(self, input_0): primals_1 = self.conv.conv_1.weight primals_2 = self.conv.conv_1.bias primals_4 = self.conv.conv_2.weight primals_5 = self.conv.conv_2.bias primals_6 = self.fc.fc1.weight primals_7 = self.fc.fc1.bias primals_8 = self.fc.fc2.weight primals_9 = self.fc.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
anukaal/opytimizer
CNN
false
14,901
[ "Apache-2.0" ]
528
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
https://github.com/anukaal/opytimizer/tree/5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
distLinear
import torch import torch.nn as nn import torch.optim import torch.utils.data.sampler from torch.nn.utils.weight_norm import WeightNorm class distLinear(nn.Module): def __init__(self, indim, outdim): super(distLinear, self).__init__() self.L = nn.Linear(indim, outdim, bias=False) self.class_wise_learnable_norm = True if self.class_wise_learnable_norm: WeightNorm.apply(self.L, 'weight', dim=0) if outdim <= 200: self.scale_factor = 2 else: self.scale_factor = 10 def forward(self, x): x_norm = torch.norm(x, p=2, dim=1).unsqueeze(1).expand_as(x) x_normalized = x.div(x_norm + 1e-05) if not self.class_wise_learnable_norm: L_norm = torch.norm(self.L.weight.data, p=2, dim=1).unsqueeze(1 ).expand_as(self.L.weight.data) self.L.weight.data = self.L.weight.data.div(L_norm + 1e-05) cos_dist = self.L(x_normalized) scores = self.scale_factor * cos_dist return scores def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'indim': 4, 'outdim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.optim import torch.utils.data.sampler from torch.nn.utils.weight_norm import WeightNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__weight_norm_interface_0[grid(4)](primals_3, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(16)](primals_3, primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_2[grid(256)](primals_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_mul_3[grid(256)](buf4, 256, XBLOCK=256, num_warps= 4, num_stages=1) return buf4, buf1, primals_2, primals_3, buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0) class distLinearNew(nn.Module): def __init__(self, indim, outdim): super(distLinearNew, self).__init__() self.L = nn.Linear(indim, outdim, bias=False) self.class_wise_learnable_norm = True if self.class_wise_learnable_norm: WeightNorm.apply(self.L, 'weight', dim=0) if outdim <= 200: self.scale_factor = 2 else: self.scale_factor = 10 def forward(self, input_0): primals_2 = self.L.weight_g primals_3 = self.L.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
artificially-ai/FewShotVision
distLinear
false
14,902
[ "MIT" ]
90
02c1132828bc9caba4cadd0b2f731bd63f66b826
https://github.com/artificially-ai/FewShotVision/tree/02c1132828bc9caba4cadd0b2f731bd63f66b826
UnpoolingAsConvolution
import torch import torch.nn as nn def get_incoming_shape(incoming): size = incoming.size() return [size[0], size[1], size[2], size[3]] def interleave(tensors, axis): old_shape = get_incoming_shape(tensors[0])[1:] new_shape = [-1] + old_shape new_shape[axis] *= len(tensors) stacked = torch.stack(tensors, axis + 1) reshaped = stacked.view(new_shape) return reshaped class UnpoolingAsConvolution(nn.Module): def __init__(self, inplanes, planes): super(UnpoolingAsConvolution, self).__init__() self.conv_A = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(3, 3), stride=1, padding=1) self.conv_B = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(2, 3), stride=1, padding=0) self.conv_C = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(3, 2), stride=1, padding=0) self.conv_D = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(2, 2), stride=1, padding=0) def forward(self, x): output_a = self.conv_A(x) padded_b = nn.functional.pad(x, (1, 1, 0, 1)) output_b = self.conv_B(padded_b) padded_c = nn.functional.pad(x, (0, 1, 1, 1)) output_c = self.conv_C(padded_c) padded_d = nn.functional.pad(x, (0, 1, 0, 1)) output_d = self.conv_D(padded_d) left = interleave([output_a, output_b], axis=2) right = interleave([output_c, output_d], axis=2) y = interleave([left, right], axis=3) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 5 x0 = xindex % 6 x2 = xindex // 30 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = -1 + x0 tmp4 = tl.full([1], 0, tl.int64) tmp5 = tmp3 >= tmp4 tmp6 = tmp3 < tmp1 tmp7 = tmp2 & tmp5 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 6 x0 = xindex % 5 x2 = xindex // 30 x3 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 < tmp3 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 4 x2 = xindex // 8 % 8 x5 = xindex // 64 x3 = xindex // 64 % 4 x6 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x1 + 4 * (x2 % 2) tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tmp8 & tmp4 tmp10 = tl.load(in_ptr0 + (4 * (x2 // 2) + 16 * x5 + (x1 + 4 * (x2 % 2) )), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr1 + x3, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 + tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp5 >= tmp7 tl.full([1], 8, tl.int64) tmp18 = tmp15 & tmp4 tmp19 = tl.load(in_ptr2 + (4 * (x2 // 2) + 16 * x5 + (-4 + x1 + 4 * (x2 % 2))), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr3 + x3, tmp18 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp18, tmp21, tmp22) tmp24 = tl.where(tmp8, tmp14, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp4, tmp24, tmp25) tmp27 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp30 = tmp8 & tmp27 tmp31 = tl.load(in_ptr4 + (4 * (x2 // 2) + 16 * x5 + (x1 + 4 * (x2 % 2) )), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tl.load(in_ptr5 + x3, tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp33 = tmp31 + tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tmp15 & tmp27 tmp37 = tl.load(in_ptr6 + (4 * (x2 // 2) + 16 * x5 + (-4 + x1 + 4 * (x2 % 2))), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + x3, tmp36 & xmask, eviction_policy= 'evict_last', other=0.0) tmp39 = tmp37 + tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp36, tmp39, tmp40) tmp42 = tl.where(tmp8, tmp35, tmp41) tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp27, tmp42, tmp43) tmp45 = tl.where(tmp4, tmp26, tmp44) tl.store(out_ptr0 + x6, tmp45, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 2), (24, 6, 2, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(480)](primals_3, buf1, 480, XBLOCK=128, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 6, 5), (120, 30, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_1[grid(480)](primals_3, buf3, 480, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_2[grid(400)](primals_3, buf5, 400, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 8, 4, 2), (256, 64, 8, 2, 1), torch.float32) triton_poi_fused_stack_3[grid(1024)](buf0, primals_2, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 1024, XBLOCK =128, num_warps=4, num_stages=1) del buf0 del buf2 del buf4 del buf6 del primals_2 del primals_5 del primals_7 del primals_9 return reinterpret_tensor(buf7, (4, 4, 8, 8), (256, 64, 8, 1), 0 ), primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5 def get_incoming_shape(incoming): size = incoming.size() return [size[0], size[1], size[2], size[3]] def interleave(tensors, axis): old_shape = get_incoming_shape(tensors[0])[1:] new_shape = [-1] + old_shape new_shape[axis] *= len(tensors) stacked = torch.stack(tensors, axis + 1) reshaped = stacked.view(new_shape) return reshaped class UnpoolingAsConvolutionNew(nn.Module): def __init__(self, inplanes, planes): super(UnpoolingAsConvolutionNew, self).__init__() self.conv_A = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(3, 3), stride=1, padding=1) self.conv_B = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(2, 3), stride=1, padding=0) self.conv_C = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(3, 2), stride=1, padding=0) self.conv_D = nn.Conv2d(in_channels=inplanes, out_channels=planes, kernel_size=(2, 2), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv_A.weight primals_2 = self.conv_A.bias primals_4 = self.conv_B.weight primals_5 = self.conv_B.bias primals_6 = self.conv_C.weight primals_7 = self.conv_C.bias primals_8 = self.conv_D.weight primals_9 = self.conv_D.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
anve96/DE_resnet_unet_hyb
UnpoolingAsConvolution
false
14,903
[ "BSD-3-Clause" ]
45
f0751854c8707cc4f228bb9d52d93635cc3584ae
https://github.com/anve96/DE_resnet_unet_hyb/tree/f0751854c8707cc4f228bb9d52d93635cc3584ae
SEBlock
import torch import torch.nn as nn import torch.nn.functional as F class SEBlock(nn.Module): def __init__(self, input_channels, internal_neurons): super(SEBlock, self).__init__() self.down = nn.Conv2d(in_channels=input_channels, out_channels= internal_neurons, kernel_size=1, stride=1, bias=True) self.up = nn.Conv2d(in_channels=internal_neurons, out_channels= input_channels, kernel_size=1, stride=1, bias=True) self.input_channels = input_channels def forward(self, inputs): x = F.avg_pool2d(inputs, kernel_size=inputs.size(3)) x = self.down(x) x = F.relu(x) x = self.up(x) x = torch.sigmoid(x) x = x.view(-1, self.input_channels, 1, 1) return inputs * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'internal_neurons': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_view_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(16)](buf4, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_view_3[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf5, primals_1, primals_2, primals_4, buf0, buf2, buf4 class SEBlockNew(nn.Module): def __init__(self, input_channels, internal_neurons): super(SEBlockNew, self).__init__() self.down = nn.Conv2d(in_channels=input_channels, out_channels= internal_neurons, kernel_size=1, stride=1, bias=True) self.up = nn.Conv2d(in_channels=internal_neurons, out_channels= input_channels, kernel_size=1, stride=1, bias=True) self.input_channels = input_channels def forward(self, input_0): primals_2 = self.down.weight primals_3 = self.down.bias primals_4 = self.up.weight primals_5 = self.up.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
arsalan0004/6DRepNet
SEBlock
false
14,904
[ "MIT" ]
84
cdfb2b151785eb89fef70907a6f2a19fa0acf4ae
https://github.com/arsalan0004/6DRepNet/tree/cdfb2b151785eb89fef70907a6f2a19fa0acf4ae
CoordConv
import torch import torch.nn as nn class _AddCoords(nn.Module): def __init__(self, use_radius=False): super().__init__() self.use_radius = use_radius self.extra_channels = 3 if self.use_radius else 2 def forward(self, input): batch_size, _, h, w = input.size() def gen(length): return -1 + torch.arange(length, dtype=input.dtype, device= input.device) / (length - 1) * 2 results = [input] with torch.no_grad(): x_coords = gen(w).view(1, 1, 1, w).expand((batch_size, 1, h, w)) y_coords = gen(h).view(1, 1, h, 1).expand((batch_size, 1, h, w)) results.extend([x_coords, y_coords]) if self.use_radius: radius = torch.sqrt(torch.pow(x_coords - 0.5, 2) + torch. pow(y_coords - 0.5, 2)) results.append(radius) return torch.cat(results, dim=1) class CoordConv(nn.Module): def __init__(self, in_channels, out_channels, *args, use_radius=False, **kwargs): super().__init__() self.addcoords = _AddCoords(use_radius=use_radius) in_channels += self.addcoords.extra_channels self.conv = nn.Conv2d(in_channels, out_channels, *args, **kwargs) def forward(self, x): f = self.addcoords(x) f = self.conv(f) return f def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 6 x3 = xindex // 96 x4 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = x0 tmp11 = tmp10.to(tl.float32) tmp12 = 0.3333333333333333 tmp13 = tmp11 * tmp12 tmp14 = 2.0 tmp15 = tmp13 * tmp14 tmp16 = -1.0 tmp17 = tmp15 + tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tl.full([1], 6, tl.int64) tmp23 = x1 tmp24 = tmp23.to(tl.float32) tmp25 = tmp24 * tmp12 tmp26 = tmp25 * tmp14 tmp27 = tmp26 + tmp16 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tl.where(tmp9, tmp19, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + x5, tmp31, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 6, 4, 4), (96, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0 class _AddCoords(nn.Module): def __init__(self, use_radius=False): super().__init__() self.use_radius = use_radius self.extra_channels = 3 if self.use_radius else 2 def forward(self, input): batch_size, _, h, w = input.size() def gen(length): return -1 + torch.arange(length, dtype=input.dtype, device= input.device) / (length - 1) * 2 results = [input] with torch.no_grad(): x_coords = gen(w).view(1, 1, 1, w).expand((batch_size, 1, h, w)) y_coords = gen(h).view(1, 1, h, 1).expand((batch_size, 1, h, w)) results.extend([x_coords, y_coords]) if self.use_radius: radius = torch.sqrt(torch.pow(x_coords - 0.5, 2) + torch. pow(y_coords - 0.5, 2)) results.append(radius) return torch.cat(results, dim=1) class CoordConvNew(nn.Module): def __init__(self, in_channels, out_channels, *args, use_radius=False, **kwargs): super().__init__() self.addcoords = _AddCoords(use_radius=use_radius) in_channels += self.addcoords.extra_channels self.conv = nn.Conv2d(in_channels, out_channels, *args, **kwargs) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ashutosh1919/neuro-symbolic-sudoku-solver
CoordConv
false
14,905
[ "Apache-2.0" ]
52
ecb4274ff66d3b6a86f64584e0a767bf785f107f
https://github.com/ashutosh1919/neuro-symbolic-sudoku-solver/tree/ecb4274ff66d3b6a86f64584e0a767bf785f107f
ProbabilityLinear
import torch import torch.nn as nn import torch.nn.functional as F def normalize_prob(a, dim=-1): """Perform 1-norm along the specific dimension.""" return a / a.sum(dim=dim, keepdim=True) class ProbabilityLinear(nn.Linear): def __init__(self, in_features, out_features, bias=False, norm=True): assert bias is False, 'Bias regularization for SOFTMAX is not implemented.' super().__init__(in_features, out_features, bias) self.norm = norm def forward(self, input): weight = self._regulize_parameter(self.weight) output = F.linear(input, weight, None) if self.norm: output = normalize_prob(output) return output def _regulize_parameter(self, p): return F.softmax(p, dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](primals_1, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_sum_2[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf2 def normalize_prob(a, dim=-1): """Perform 1-norm along the specific dimension.""" return a / a.sum(dim=dim, keepdim=True) class ProbabilityLinearNew(nn.Linear): def __init__(self, in_features, out_features, bias=False, norm=True): assert bias is False, 'Bias regularization for SOFTMAX is not implemented.' super().__init__(in_features, out_features, bias) self.norm = norm def _regulize_parameter(self, p): return F.softmax(p, dim=0) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ashutosh1919/neuro-symbolic-sudoku-solver
ProbabilityLinear
false
14,906
[ "Apache-2.0" ]
52
ecb4274ff66d3b6a86f64584e0a767bf785f107f
https://github.com/ashutosh1919/neuro-symbolic-sudoku-solver/tree/ecb4274ff66d3b6a86f64584e0a767bf785f107f
ProbabilityBilinear
import torch import torch.nn as nn import torch.nn.functional as F def normalize_prob(a, dim=-1): """Perform 1-norm along the specific dimension.""" return a / a.sum(dim=dim, keepdim=True) class ProbabilityBilinear(nn.Bilinear): def __init__(self, in1_features, in2_features, out_features, bias=False, norm=True): assert bias is False, 'Bias regularization for SOFTMAX is not implemented.' super().__init__(in1_features, in2_features, out_features, bias) self.norm = norm def forward(self, input1, input2): weight = self._regulize_parameter(self.weight) output = F.bilinear(input1, input2, weight, None) if self.norm: output = normalize_prob(output) return output def _regulize_parameter(self, p): return F.softmax(p, dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in1_features': 4, 'in2_features': 4, 'out_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](primals_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor( primals_2, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_sum_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf4, buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf3 def normalize_prob(a, dim=-1): """Perform 1-norm along the specific dimension.""" return a / a.sum(dim=dim, keepdim=True) class ProbabilityBilinearNew(nn.Bilinear): def __init__(self, in1_features, in2_features, out_features, bias=False, norm=True): assert bias is False, 'Bias regularization for SOFTMAX is not implemented.' super().__init__(in1_features, in2_features, out_features, bias) self.norm = norm def _regulize_parameter(self, p): return F.softmax(p, dim=0) def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
ashutosh1919/neuro-symbolic-sudoku-solver
ProbabilityBilinear
false
14,907
[ "Apache-2.0" ]
52
ecb4274ff66d3b6a86f64584e0a767bf785f107f
https://github.com/ashutosh1919/neuro-symbolic-sudoku-solver/tree/ecb4274ff66d3b6a86f64584e0a767bf785f107f
GeneralSoftmax
import enum import functools import torch import torch.nn as nn import torch.nn.functional as F def _canonize_enum_value(value): if type(value) is str: value = value.lower() return value def masked_softmax(logits, mask=None, dim=-1): eps = 1e-20 probs = F.softmax(logits, dim=dim) if mask is not None: mask = mask.float() probs = probs * mask + eps probs = probs / probs.sum(dim, keepdim=True) return probs def no_grad_func(func): @functools.wraps(func) def new_func(*args, **kwargs): with torch.no_grad(): return func(*args, **kwargs) return new_func @no_grad_func def one_hot(index, nr_classes): """ Convert a list of class labels into one-hot representation. .. note:: This function support only one-dimensional input. For high dimensional inputs, use `one_hot_nd`. Args: index (Tensor): shape `(N, )`, input class labels. nr_classes (int): number of total classes. Returns: Tensor: shape `(N, nr_classes)`, one-hot representation of the class labels. """ assert index.dim() == 1 mask = torch.zeros(index.size(0), nr_classes, dtype=torch.float32, device=index.device) ones = torch.ones(index.size(0), 1, dtype=torch.float32, device=index. device) ret = mask.scatter_(1, index.unsqueeze(1), ones) return ret @no_grad_func def one_hot_nd(index, nr_classes): """ Convert a tensor of class labels into one-hot representation. Args: index (Tensor): input class labels. nr_classes (int): number of total classes. Returns: Tensor: one-hot representation of the class labels, the label dimension is assumed to be the last one. """ index_size = index.size() return one_hot(index.view(-1), nr_classes).view(index_size + (nr_classes,)) def greedy_softmax(logits, dim=-1, mask=None): assert dim == -1, 'Greedy softmax support only dim=-1' if mask is not None: probs = masked_softmax(logits, mask=mask, dim=dim) else: probs = logits one_hot = one_hot_nd(probs.max(dim)[1], logits.size(dim)) return one_hot def _sample_gumbel(shape, eps=1e-10, out=None): """ Sample from Gumbel(0, 1) based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb , (MIT license) """ U = out.resize_(shape).uniform_() if out is not None else torch.rand(shape) return -torch.log(eps - torch.log(U + eps)) def _gumbel_softmax_sample(logits, dim=-1, tau=1, eps=1e-10, mask=None): """ Draw a sample from the Gumbel-Softmax distribution based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb (MIT license) """ gumbel_noise = _sample_gumbel(logits.size(), eps=eps, out=logits.new()) y = logits + gumbel_noise return masked_softmax(y / tau, mask, dim=dim) def set_index_one_hot_(tensor, dim, index, value): """ `tensor[:, :, index, :, :] = value`. Args: tensor (Tensor): input. dim (int) the dimension. index: (LongTensor): the tensor containing the indices along the `dim` dimension. """ if not isinstance(value, (int, float)): value = value.unsqueeze(dim) tensor.scatter_(dim, index.unsqueeze(dim), value) def gumbel_softmax(logits, dim=-1, tau=1, hard=False, mask=None, eps=1e-10): """ Sample from the Gumbel-Softmax distribution and optionally discretize. Args: logits: [batch_size, n_class] unnormalized log-probs dim: along which dim the softmax is performed tau: non-negative scalar temperature hard: if True, take argmax, but differentiate w.r.t. soft sample y eps: eps Returns: [batch_size, n_class] sample from the Gumbel-Softmax distribution. If hard=True, then the returned sample will be one-hot, otherwise it will be a probability distribution that sums to 1 across classes Based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb , (MIT license) """ y_soft = _gumbel_softmax_sample(logits, tau=tau, eps=eps) if hard: with torch.no_grad(): _, k = y_soft.max(dim=dim) y_hard = torch.zeros_like(logits) y_hard.requires_grad = False set_index_one_hot_(y_hard, dim, k, 1.0) y = (y_hard - y_soft).detach() + y_soft else: y = y_soft return y def general_softmax(logits, dim=-1, tau=1, impl='standard', mask=None, training=False): impl = SoftmaxImplmentation.from_string(impl) if impl is SoftmaxImplmentation.STANDARD: return masked_softmax(logits / tau, dim=dim) elif impl in (SoftmaxImplmentation.GUMBEL, SoftmaxImplmentation.GUMBEL_HARD ): if not training: return greedy_softmax(logits, dim=dim, mask=mask) if impl is SoftmaxImplmentation.GUMBEL: return gumbel_softmax(logits, dim=dim, tau=tau, hard=False, mask=mask) else: return gumbel_softmax(logits, dim=dim, tau=tau, hard=True, mask =mask) class JacEnum(enum.Enum): """A customized enumeration class, adding helper functions for string-based argument parsing.""" @classmethod def from_string(cls, value): value = _canonize_enum_value(value) return cls(value) @classmethod def type_name(cls): return cls.__name__ @classmethod def choice_names(cls): return list(filter(lambda x: not x.startswith('_'), dir(cls))) @classmethod def choice_objs(cls): return [getattr(cls, name) for name in cls.choice_names()] @classmethod def choice_values(cls): return [getattr(cls, name).value for name in cls.choice_names()] @classmethod def is_valid(cls, value): value = _canonize_enum_value(value) return value in cls.choice_values() @classmethod def assert_valid(cls, value): assert cls.is_valid(value ), 'Invalid {}: "{}". Supported choices: {}.'.format(cls. type_name(), value, ','.join(cls.choice_values())) def __jsonify__(self): return self.value class SoftmaxImplmentation(JacEnum): STANDARD = 'standard' GUMBEL = 'gumbel' GUMBEL_HARD = 'gumbel_hard' class GeneralSoftmax(nn.Module): def __init__(self, dim=-1, tau=1.0, impl='standard'): super().__init__() self.dim = dim self.tau = tau self.impl = SoftmaxImplmentation.from_string(impl) def forward(self, logits, mask=None): return general_softmax(logits, dim=self.dim, tau=self.tau, impl= self.impl, mask=mask, training=self.training) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import enum import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, def _canonize_enum_value(value): if type(value) is str: value = value.lower() return value def masked_softmax(logits, mask=None, dim=-1): eps = 1e-20 probs = F.softmax(logits, dim=dim) if mask is not None: mask = mask.float() probs = probs * mask + eps probs = probs / probs.sum(dim, keepdim=True) return probs def no_grad_func(func): @functools.wraps(func) def new_func(*args, **kwargs): with torch.no_grad(): return func(*args, **kwargs) return new_func @no_grad_func def one_hot(index, nr_classes): """ Convert a list of class labels into one-hot representation. .. note:: This function support only one-dimensional input. For high dimensional inputs, use `one_hot_nd`. Args: index (Tensor): shape `(N, )`, input class labels. nr_classes (int): number of total classes. Returns: Tensor: shape `(N, nr_classes)`, one-hot representation of the class labels. """ assert index.dim() == 1 mask = torch.zeros(index.size(0), nr_classes, dtype=torch.float32, device=index.device) ones = torch.ones(index.size(0), 1, dtype=torch.float32, device=index. device) ret = mask.scatter_(1, index.unsqueeze(1), ones) return ret @no_grad_func def one_hot_nd(index, nr_classes): """ Convert a tensor of class labels into one-hot representation. Args: index (Tensor): input class labels. nr_classes (int): number of total classes. Returns: Tensor: one-hot representation of the class labels, the label dimension is assumed to be the last one. """ index_size = index.size() return one_hot(index.view(-1), nr_classes).view(index_size + (nr_classes,)) def greedy_softmax(logits, dim=-1, mask=None): assert dim == -1, 'Greedy softmax support only dim=-1' if mask is not None: probs = masked_softmax(logits, mask=mask, dim=dim) else: probs = logits one_hot = one_hot_nd(probs.max(dim)[1], logits.size(dim)) return one_hot def _sample_gumbel(shape, eps=1e-10, out=None): """ Sample from Gumbel(0, 1) based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb , (MIT license) """ U = out.resize_(shape).uniform_() if out is not None else torch.rand(shape) return -torch.log(eps - torch.log(U + eps)) def _gumbel_softmax_sample(logits, dim=-1, tau=1, eps=1e-10, mask=None): """ Draw a sample from the Gumbel-Softmax distribution based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb (MIT license) """ gumbel_noise = _sample_gumbel(logits.size(), eps=eps, out=logits.new()) y = logits + gumbel_noise return masked_softmax(y / tau, mask, dim=dim) def set_index_one_hot_(tensor, dim, index, value): """ `tensor[:, :, index, :, :] = value`. Args: tensor (Tensor): input. dim (int) the dimension. index: (LongTensor): the tensor containing the indices along the `dim` dimension. """ if not isinstance(value, (int, float)): value = value.unsqueeze(dim) tensor.scatter_(dim, index.unsqueeze(dim), value) def gumbel_softmax(logits, dim=-1, tau=1, hard=False, mask=None, eps=1e-10): """ Sample from the Gumbel-Softmax distribution and optionally discretize. Args: logits: [batch_size, n_class] unnormalized log-probs dim: along which dim the softmax is performed tau: non-negative scalar temperature hard: if True, take argmax, but differentiate w.r.t. soft sample y eps: eps Returns: [batch_size, n_class] sample from the Gumbel-Softmax distribution. If hard=True, then the returned sample will be one-hot, otherwise it will be a probability distribution that sums to 1 across classes Based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb , (MIT license) """ y_soft = _gumbel_softmax_sample(logits, tau=tau, eps=eps) if hard: with torch.no_grad(): _, k = y_soft.max(dim=dim) y_hard = torch.zeros_like(logits) y_hard.requires_grad = False set_index_one_hot_(y_hard, dim, k, 1.0) y = (y_hard - y_soft).detach() + y_soft else: y = y_soft return y def general_softmax(logits, dim=-1, tau=1, impl='standard', mask=None, training=False): impl = SoftmaxImplmentation.from_string(impl) if impl is SoftmaxImplmentation.STANDARD: return masked_softmax(logits / tau, dim=dim) elif impl in (SoftmaxImplmentation.GUMBEL, SoftmaxImplmentation.GUMBEL_HARD ): if not training: return greedy_softmax(logits, dim=dim, mask=mask) if impl is SoftmaxImplmentation.GUMBEL: return gumbel_softmax(logits, dim=dim, tau=tau, hard=False, mask=mask) else: return gumbel_softmax(logits, dim=dim, tau=tau, hard=True, mask =mask) class JacEnum(enum.Enum): """A customized enumeration class, adding helper functions for string-based argument parsing.""" @classmethod def from_string(cls, value): value = _canonize_enum_value(value) return cls(value) @classmethod def type_name(cls): return cls.__name__ @classmethod def choice_names(cls): return list(filter(lambda x: not x.startswith('_'), dir(cls))) @classmethod def choice_objs(cls): return [getattr(cls, name) for name in cls.choice_names()] @classmethod def choice_values(cls): return [getattr(cls, name).value for name in cls.choice_names()] @classmethod def is_valid(cls, value): value = _canonize_enum_value(value) return value in cls.choice_values() @classmethod def assert_valid(cls, value): assert cls.is_valid(value ), 'Invalid {}: "{}". Supported choices: {}.'.format(cls. type_name(), value, ','.join(cls.choice_values())) def __jsonify__(self): return self.value class SoftmaxImplmentation(JacEnum): STANDARD = 'standard' GUMBEL = 'gumbel' GUMBEL_HARD = 'gumbel_hard' class GeneralSoftmaxNew(nn.Module): def __init__(self, dim=-1, tau=1.0, impl='standard'): super().__init__() self.dim = dim self.tau = tau self.impl = SoftmaxImplmentation.from_string(impl) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ashutosh1919/neuro-symbolic-sudoku-solver
GeneralSoftmax
false
14,908
[ "Apache-2.0" ]
52
ecb4274ff66d3b6a86f64584e0a767bf785f107f
https://github.com/ashutosh1919/neuro-symbolic-sudoku-solver/tree/ecb4274ff66d3b6a86f64584e0a767bf785f107f
TLU
import torch from torch import nn from torch.nn import Parameter from torch.nn.parameter import Parameter class TLU(nn.Module): def __init__(self, num_features): """max(y, tau) = max(y - tau, 0) + tau = ReLU(y - tau) + tau""" super(TLU, self).__init__() self.num_features = num_features self.tau = Parameter(torch.Tensor(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.zeros_(self.tau) def extra_repr(self): return 'num_features={num_features}'.format(**self.__dict__) def forward(self, x): return torch.max(x, self.tau.view(1, self.num_features, 1, 1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torch.nn import Parameter from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_eq_gt_maximum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp0 == tmp1 tmp4 = tmp0 > tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr1 + x3, tmp3, xmask) tl.store(out_ptr2 + x3, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_gt_maximum_0[grid(256)](primals_2, primals_1, buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, buf1, buf2 class TLUNew(nn.Module): def __init__(self, num_features): """max(y, tau) = max(y - tau, 0) + tau = ReLU(y - tau) + tau""" super(TLUNew, self).__init__() self.num_features = num_features self.tau = Parameter(torch.Tensor(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.zeros_(self.tau) def extra_repr(self): return 'num_features={num_features}'.format(**self.__dict__) def forward(self, input_0): primals_1 = self.tau primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
asvk/fast-reid
TLU
false
14,909
[ "Apache-2.0" ]
71
cf246e9bee5b5e5d154de98ba0395b7a5d0d0ab7
https://github.com/asvk/fast-reid/tree/cf246e9bee5b5e5d154de98ba0395b7a5d0d0ab7
ResidualLinear
import torch import torch.nn as nn class ResidualLinear(nn.Module): def __init__(self, hidden_dim, norm1=None, norm2=None): super().__init__() self.linear1 = nn.Linear(hidden_dim, hidden_dim) self.norm1 = norm1 self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.norm2 = norm2 self.relu = nn.ReLU(inplace=True) def forward(self, input): f = self.linear1(input) if self.norm1 is not None: f = self.norm1(f) f = self.relu(f) f = self.linear2(f) if self.norm2 is not None: f = self.norm2(f) f = f + input f = self.relu(f) return f def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_2[grid(256)](buf4, primals_5, primals_3, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, primals_4, buf6 class ResidualLinearNew(nn.Module): def __init__(self, hidden_dim, norm1=None, norm2=None): super().__init__() self.linear1 = nn.Linear(hidden_dim, hidden_dim) self.norm1 = norm1 self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.norm2 = norm2 self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ashutosh1919/neuro-symbolic-sudoku-solver
ResidualLinear
false
14,910
[ "Apache-2.0" ]
52
ecb4274ff66d3b6a86f64584e0a767bf785f107f
https://github.com/ashutosh1919/neuro-symbolic-sudoku-solver/tree/ecb4274ff66d3b6a86f64584e0a767bf785f107f
Conv1d_mp
import torch import torch.nn as nn class Conv1d_mp(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: 'int', stride: 'int'=1, padding: 'int'=1): super(Conv1d_mp, self).__init__() self._kernel_size = kernel_size self._stride = stride self._padding = padding self._ops = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding) self._activation = nn.ReLU() self._mp = nn.MaxPool1d(2, 2) def forward(self, x): return self._mp(self._activation(self._ops(x))) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 3), (12, 3, 1)) buf1 = reinterpret_tensor(buf0, (4, 3), (3, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 3), (3, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(12)](buf1, primals_2, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.int8) buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_1[grid(4)](buf1, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) return reinterpret_tensor(buf3, (4, 1), (1, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf1, (4, 1, 3), (3, 3, 1), 0), buf2, buf4 class Conv1d_mpNew(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: 'int', stride: 'int'=1, padding: 'int'=1): super(Conv1d_mpNew, self).__init__() self._kernel_size = kernel_size self._stride = stride self._padding = padding self._ops = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding) self._activation = nn.ReLU() self._mp = nn.MaxPool1d(2, 2) def forward(self, input_0): primals_1 = self._ops.weight primals_2 = self._ops.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
atosystem/MIDI-BERT
Conv1d_mp
false
14,911
[ "MIT" ]
109
61f7efb3be85a2a847e6585237036e052235a6a0
https://github.com/atosystem/MIDI-BERT/tree/61f7efb3be85a2a847e6585237036e052235a6a0
TripletLoss
import torch import torch.nn as nn import torch.nn.functional as F class TripletLoss(nn.Module): """ Triplet loss Takes embeddings of an anchor sample, a positive sample and a negative sample """ def __init__(self, margin=1.0): super(TripletLoss, self).__init__() self.margin = margin def forward(self, anchor, positive, negative, size_average=True): distance_positive = (anchor - positive).pow(2).sum(1).pow(0.5) distance_negative = (anchor - negative).pow(2).sum(1).pow(0.5) losses = F.relu(distance_positive - distance_negative + self.margin) return losses.mean() if size_average else losses.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp23 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp27 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp31 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp21 = tmp0 - tmp20 tmp22 = tmp21 * tmp21 tmp24 = tmp4 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tmp22 + tmp25 tmp28 = tmp9 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp26 + tmp29 tmp32 = tmp14 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp30 + tmp33 tmp35 = libdevice.sqrt(tmp34) tmp36 = tmp19 - tmp35 tmp37 = 1.0 tmp38 = tmp36 + tmp37 tmp39 = tl.full([1, 1], 0, tl.int32) tmp40 = triton_helpers.maximum(tmp39, tmp38) tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43 = tl.sum(tmp41, 1)[:, None] tmp44 = 64.0 tmp45 = tmp43 / tmp44 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp45, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class TripletLossNew(nn.Module): """ Triplet loss Takes embeddings of an anchor sample, a positive sample and a negative sample """ def __init__(self, margin=1.0): super(TripletLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
awesome-archive/CAIL2019
TripletLoss
false
14,912
[ "MIT" ]
300
31e917752676ad77d247a47e04f17a8f9ea68721
https://github.com/awesome-archive/CAIL2019/tree/31e917752676ad77d247a47e04f17a8f9ea68721
TripletLoss_op
import torch import torch.nn as nn import torch.nn.functional as F class TripletLoss_op(nn.Module): def __init__(self, margin=1.0): super(TripletLoss_op, self).__init__() self.margin = margin def forward(self, op, anchor, positive, negative, size_average=True): distance_positive = (anchor - positive).pow(2).sum(1).pow(0.5) distance_negative = (anchor - negative).pow(2).sum(1).pow(0.5) losses = F.relu(op * (distance_positive - distance_negative) + self .margin) return losses.mean() if size_average else losses.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp31 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp21 = tmp0 - tmp20 tmp22 = tmp21 * tmp21 tmp24 = tmp4 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tmp22 + tmp25 tmp28 = tmp9 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp26 + tmp29 tmp32 = tmp14 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp30 + tmp33 tmp35 = libdevice.sqrt(tmp34) tmp36 = tmp19 - tmp35 tl.store(out_ptr0 + x2, tmp36, xmask) @triton.jit def triton_per_fused_add_mean_mul_relu_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_mean_mul_relu_1[grid(1)](buf2, arg3_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg3_1 del buf0 return buf2, class TripletLoss_opNew(nn.Module): def __init__(self, margin=1.0): super(TripletLoss_opNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
awesome-archive/CAIL2019
TripletLoss_op
false
14,913
[ "MIT" ]
300
31e917752676ad77d247a47e04f17a8f9ea68721
https://github.com/awesome-archive/CAIL2019/tree/31e917752676ad77d247a47e04f17a8f9ea68721
L2Norm
import torch from itertools import product as product from math import sqrt as sqrt import torch.nn as nn import torch.nn.init as init import torch.utils.data class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x /= norm out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from itertools import product as product from math import sqrt as sqrt import torch.nn as nn import torch.nn.init as init import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp16 * tmp15 tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp17, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_1, primals_2, buf0, buf1, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf1, buf0 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
avisekiit/adversarial_object_detection
L2Norm
false
14,915
[ "MIT" ]
795
263f264b3f2bdb0f116ebbb30ec4a805f357b3a6
https://github.com/avisekiit/adversarial_object_detection/tree/263f264b3f2bdb0f116ebbb30ec4a805f357b3a6
Atan
import torch import torch.nn as nn class Atan(nn.Module): def forward(self, x): return torch.atan(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_atan_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.atan(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_atan_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AtanNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
Atan
false
14,916
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
InverseSqrt
import torch import torch.nn as nn class InverseSqrt(nn.Module): def forward(self, x, alpha=1.0): return x / torch.sqrt(1.0 + alpha * x * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 * tmp0 tmp4 = tmp3 + tmp1 tmp5 = libdevice.sqrt(tmp4) tmp6 = tmp0 / tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class InverseSqrtNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
InverseSqrt
false
14,917
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
BipolarSigmoid
import torch import torch.nn as nn class BipolarSigmoid(nn.Module): def forward(self, x): return (1.0 - torch.exp(-x)) / (1.0 + torch.exp(-x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_exp_neg_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tmp2 + tmp3 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_exp_neg_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class BipolarSigmoidNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
BipolarSigmoid
false
14,918
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
BertSelfOutput
from _paritybench_helpers import _mock_config import torch from torch import nn class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5, buf2, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2 class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfOutputNew(nn.Module): def __init__(self, config): super(BertSelfOutputNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
BIT-ENGD/eeqa
BertSelfOutput
false
14,919
[ "MIT" ]
142
2995abbaff1fb47131246a247ee7ed62aa94f4c3
https://github.com/BIT-ENGD/eeqa/tree/2995abbaff1fb47131246a247ee7ed62aa94f4c3
MaskedCrossEntropyCriterion
import torch import torch.nn as nn from torch.nn.modules.loss import _WeightedLoss class MaskedCrossEntropyCriterion(_WeightedLoss): def __init__(self, ignore_index=[-100], reduce=None): super(MaskedCrossEntropyCriterion, self).__init__() self.padding_idx = ignore_index self.reduce = reduce def forward(self, outputs, targets): lprobs = nn.functional.log_softmax(outputs, dim=-1) lprobs = lprobs.view(-1, lprobs.size(-1)) for idx in self.padding_idx: targets[targets == idx] = 0 nll_loss = -lprobs.gather(dim=-1, index=targets.unsqueeze(1)) if self.reduce: nll_loss = nll_loss.sum() return nll_loss.squeeze() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import _WeightedLoss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_gather_index_put_lift_fresh_neg_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], -100, tl.int64) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = tl.full([XBLOCK], 4, tl.int32) tmp6 = tmp4 + tmp5 tmp7 = tmp4 < 0 tmp8 = tl.where(tmp7, tmp6, tmp4) tl.device_assert((0 <= tmp8) & (tmp8 < 4) | ~xmask, 'index out of bounds: 0 <= tmp8 < 4') tmp10 = tl.load(in_ptr1 + (tmp8 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = -tmp23 tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp24, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused_gather_index_put_lift_fresh_neg_1[grid(4)](arg1_1, buf0, arg1_1, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg1_1 del buf0 return reinterpret_tensor(buf3, (4,), (1,), 0), class MaskedCrossEntropyCriterionNew(_WeightedLoss): def __init__(self, ignore_index=[-100], reduce=None): super(MaskedCrossEntropyCriterionNew, self).__init__() self.padding_idx = ignore_index self.reduce = reduce def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
awesome-archive/inversecooking
MaskedCrossEntropyCriterion
false
14,920
[ "MIT" ]
591
bd07fad6e2efb7ed3bf496f0e19913ed063b3729
https://github.com/awesome-archive/inversecooking/tree/bd07fad6e2efb7ed3bf496f0e19913ed063b3729
Sinc
import torch import torch.nn as nn class Sinc(nn.Module): def forward(self, x, epsilon=1e-09): return torch.sin(x + epsilon) / (x + epsilon) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1e-09 tmp2 = tmp0 + tmp1 tmp3 = tl_math.sin(tmp2) tmp4 = tmp3 / tmp2 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_sin_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SincNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
Sinc
false
14,921
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
CAModel
import torch import torch.nn as nn import torch.nn.functional as F class CAModel(nn.Module): def __init__(self, env_d): super(CAModel, self).__init__() self.conv1 = nn.Conv2d(env_d * 3, 144, 1) self.conv2 = nn.Conv2d(144, env_d, 1) nn.init.zeros_(self.conv2.weight) nn.init.zeros_(self.conv2.bias) def forward(self, x): x = F.relu(self.conv1(x)) return self.conv2(x) def get_inputs(): return [torch.rand([4, 12, 64, 64])] def get_init_inputs(): return [[], {'env_d': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 48 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 12 y1 = yindex // 12 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 12 * x2 + 49152 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 144 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (144, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_2, (144,), (1,)) assert_size_stride(primals_3, (4, 12, 64, 64), (49152, 4096, 64, 1)) assert_size_stride(primals_4, (4, 144, 1, 1), (144, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12, 64, 64), (49152, 1, 768, 12), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(48, 4096)](primals_3, buf0, 48, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 144, 64, 64), (589824, 1, 9216, 144)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(2359296)](buf2, primals_2, 2359296, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 64, 64), (16384, 1, 256, 4)) buf4 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_2[grid(16, 4096)](buf3, primals_5, buf4, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, primals_1, buf0, primals_4, buf2 class CAModelNew(nn.Module): def __init__(self, env_d): super(CAModelNew, self).__init__() self.conv1 = nn.Conv2d(env_d * 3, 144, 1) self.conv2 = nn.Conv2d(144, env_d, 1) nn.init.zeros_(self.conv2.weight) nn.init.zeros_(self.conv2.bias) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
anishau/Growing-Neural-Cellular-Automata-Pytorch
CAModel
false
14,922
[ "Apache-2.0" ]
47
0e99815060ea4977597059fac5b556fe24e80dff
https://github.com/anishau/Growing-Neural-Cellular-Automata-Pytorch/tree/0e99815060ea4977597059fac5b556fe24e80dff
BentIdentity
import torch import torch.nn as nn class BentIdentity(nn.Module): def forward(self, x, alpha=1.0): return x + (torch.sqrt(1.0 + x * x) - 1.0) / 2.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = libdevice.sqrt(tmp3) tmp5 = tmp4 - tmp2 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tmp0 + tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sqrt_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class BentIdentityNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
BentIdentity
false
14,923
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
LeCunTanh
import torch import torch.nn as nn class LeCunTanh(nn.Module): def forward(self, x): return 1.7159 * torch.tanh(2.0 / 3 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.6666666666666666 tmp2 = tmp0 * tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = 1.7159 tmp5 = tmp3 * tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LeCunTanhNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
LeCunTanh
false
14,924
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
Gaussian
import torch import torch.nn as nn class Gaussian(nn.Module): def forward(self, x): return torch.exp(-x * x / 2.0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = tmp1 * tmp0 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_mul_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GaussianNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
Gaussian
false
14,925
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
CosReLU
import torch import torch.nn as nn class CosReLU(nn.Module): def forward(self, x): return torch.cos(x) + torch.relu(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_cos_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.cos(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp0) tmp4 = tmp1 + tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_cos_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CosReLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
CosReLU
false
14,926
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
LogLog
import torch import torch.nn as nn class LogLog(nn.Module): def forward(self, x): return 1.0 - torch.exp(-torch.exp(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_neg_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = -tmp1 tmp3 = tl_math.exp(tmp2) tmp4 = 1.0 tmp5 = tmp4 - tmp3 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_neg_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class LogLogNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
LogLog
false
14,927
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
SinReLU
import torch import torch.nn as nn class SinReLU(nn.Module): def forward(self, x): return torch.sin(x) + torch.relu(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_relu_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.sin(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp0) tmp4 = tmp1 + tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_relu_sin_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SinReLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
SinReLU
false
14,928
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
MLP
import torch import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): def __init__(self, num_classes, n_1, n_2): super(MLP, self).__init__() self.fc1 = nn.Linear(784, n_1) self.fc2 = nn.Linear(n_1, n_2) self.fc3 = nn.Linear(n_2, num_classes) def forward(self, din): din = din.view(-1, 28 * 28) dout = F.relu(self.fc1(din)) dout = F.relu(self.fc2(dout)) return self.fc3(dout) def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {'num_classes': 4, 'n_1': 4, 'n_2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (4, 784), (784, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 4), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(16)](buf3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class MLPNew(nn.Module): def __init__(self, num_classes, n_1, n_2): super(MLPNew, self).__init__() self.fc1 = nn.Linear(784, n_1) self.fc2 = nn.Linear(n_1, n_2) self.fc3 = nn.Linear(n_2, num_classes) def forward(self, input_0): primals_1 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
awslabs/adatune
MLP
false
14,929
[ "Apache-2.0" ]
266
aecbc498f4545f038c71252e085c2e70a35941c7
https://github.com/awslabs/adatune/tree/aecbc498f4545f038c71252e085c2e70a35941c7
BartClassificationHead
import torch import torch.utils.data from torch import nn class BartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim, inner_dim, num_classes, pooler_dropout): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, x): x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'inner_dim': 4, 'num_classes': 4, 'pooler_dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4 class BartClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim, inner_dim, num_classes, pooler_dropout): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
awslabs/gap-text2sql
BartClassificationHead
false
14,930
[ "Apache-2.0" ]
75
83af3f08a6c108f7cbacb8125e2a7ec9255c81b0
https://github.com/awslabs/gap-text2sql/tree/83af3f08a6c108f7cbacb8125e2a7ec9255c81b0
CNN
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = x.view(-1, 1, 28, 28) x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 1, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 23040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 576 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 5760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x3 = xindex // 12 x2 = xindex // 1440 x4 = xindex % 1440 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x3), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + 1536 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp18, xmask) tl.store(out_ptr2 + x2, tmp20, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (10, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_3, (10,), (1,)) assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (50, 320), (320, 1)) assert_size_stride(primals_7, (50,), (1,)) assert_size_stride(primals_8, (10, 50), (50, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 24, 24), (5760, 576, 24, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(23040)](buf1, primals_3, 23040, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10, 12, 12), (1536, 144, 12, 1), torch.int8) buf3 = empty_strided_cuda((4, 10, 12, 12), (1440, 144, 12, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_1[grid(5760)](buf1, buf2, buf3, 5760, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 20, 8, 8), (1280, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(5120)](buf5, primals_5, 5120, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.int8) buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32 ) buf14 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid (1280)](buf5, buf6, buf7, buf14, 1280, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 320), (320, 1), 0), reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(200)](buf9, primals_7, 200, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf10) del primals_9 buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__softmax_5[grid(4)](buf10, buf13, 4, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf10 return (buf13, primals_2, primals_4, primals_1, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 320), (320, 1), 0), buf9, buf13, primals_8, primals_6, buf14) class CNNNew(nn.Module): def __init__(self): super(CNNNew, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
awesome-archive/DeepLearningWithPyTorch
CNN
false
14,931
[ "MIT" ]
85
921e3c1bc33f88e2b749dd1f9dac8a414bd4a1ee
https://github.com/awesome-archive/DeepLearningWithPyTorch/tree/921e3c1bc33f88e2b749dd1f9dac8a414bd4a1ee
MINCNet
import torch import torch.utils.data import torch.nn as nn class MINCNet(nn.Module): def __init__(self): super(MINCNet, self).__init__() self.ReLU = nn.ReLU(True) self.conv11 = nn.Conv2d(3, 64, 3, 1, 1) self.conv12 = nn.Conv2d(64, 64, 3, 1, 1) self.maxpool1 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv21 = nn.Conv2d(64, 128, 3, 1, 1) self.conv22 = nn.Conv2d(128, 128, 3, 1, 1) self.maxpool2 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv31 = nn.Conv2d(128, 256, 3, 1, 1) self.conv32 = nn.Conv2d(256, 256, 3, 1, 1) self.conv33 = nn.Conv2d(256, 256, 3, 1, 1) self.maxpool3 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv41 = nn.Conv2d(256, 512, 3, 1, 1) self.conv42 = nn.Conv2d(512, 512, 3, 1, 1) self.conv43 = nn.Conv2d(512, 512, 3, 1, 1) self.maxpool4 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv51 = nn.Conv2d(512, 512, 3, 1, 1) self.conv52 = nn.Conv2d(512, 512, 3, 1, 1) self.conv53 = nn.Conv2d(512, 512, 3, 1, 1) def forward(self, x): out = self.ReLU(self.conv11(x)) out = self.ReLU(self.conv12(out)) out = self.maxpool1(out) out = self.ReLU(self.conv21(out)) out = self.ReLU(self.conv22(out)) out = self.maxpool2(out) out = self.ReLU(self.conv31(out)) out = self.ReLU(self.conv32(out)) out = self.ReLU(self.conv33(out)) out = self.maxpool3(out) out = self.ReLU(self.conv41(out)) out = self.ReLU(self.conv42(out)) out = self.ReLU(self.conv43(out)) out = self.maxpool4(out) out = self.ReLU(self.conv51(out)) out = self.ReLU(self.conv52(out)) out = self.conv53(out) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_16(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x1 = xindex // 512 % 4 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 1024 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4608 + x0 + 1024 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_18(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 8192 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17, buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23, buf24, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_13[grid(262144)](buf27, primals_11, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_13[grid(262144)](buf29, primals_13, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_13[grid(262144)](buf31, primals_15, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31, buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_15[grid(131072)](buf35, primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_15[grid(131072)](buf37, primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_15[grid(131072)](buf39, primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf40 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf41 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_16[grid(32768)](buf39, buf40, buf41, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf42 = extern_kernels.convolution(buf40, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_17[grid(32768)](buf43, primals_23, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf44 = extern_kernels.convolution(buf43, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf45 = buf44 del buf44 triton_poi_fused_convolution_relu_17[grid(32768)](buf45, primals_25, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf46 = extern_kernels.convolution(buf45, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf47 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_convolution_18[grid(2048, 16)](buf46, primals_27, buf47, 2048, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf46 del primals_27 return (buf47, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf40, buf41, buf43, buf45) class MINCNetNew(nn.Module): def __init__(self): super(MINCNetNew, self).__init__() self.ReLU = nn.ReLU(True) self.conv11 = nn.Conv2d(3, 64, 3, 1, 1) self.conv12 = nn.Conv2d(64, 64, 3, 1, 1) self.maxpool1 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv21 = nn.Conv2d(64, 128, 3, 1, 1) self.conv22 = nn.Conv2d(128, 128, 3, 1, 1) self.maxpool2 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv31 = nn.Conv2d(128, 256, 3, 1, 1) self.conv32 = nn.Conv2d(256, 256, 3, 1, 1) self.conv33 = nn.Conv2d(256, 256, 3, 1, 1) self.maxpool3 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv41 = nn.Conv2d(256, 512, 3, 1, 1) self.conv42 = nn.Conv2d(512, 512, 3, 1, 1) self.conv43 = nn.Conv2d(512, 512, 3, 1, 1) self.maxpool4 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True) self.conv51 = nn.Conv2d(512, 512, 3, 1, 1) self.conv52 = nn.Conv2d(512, 512, 3, 1, 1) self.conv53 = nn.Conv2d(512, 512, 3, 1, 1) def forward(self, input_0): primals_1 = self.conv11.weight primals_2 = self.conv11.bias primals_4 = self.conv12.weight primals_5 = self.conv12.bias primals_6 = self.conv21.weight primals_7 = self.conv21.bias primals_8 = self.conv22.weight primals_9 = self.conv22.bias primals_10 = self.conv31.weight primals_11 = self.conv31.bias primals_12 = self.conv32.weight primals_13 = self.conv32.bias primals_14 = self.conv33.weight primals_15 = self.conv33.bias primals_16 = self.conv41.weight primals_17 = self.conv41.bias primals_18 = self.conv42.weight primals_19 = self.conv42.bias primals_20 = self.conv43.weight primals_21 = self.conv43.bias primals_22 = self.conv51.weight primals_23 = self.conv51.bias primals_24 = self.conv52.weight primals_25 = self.conv52.bias primals_26 = self.conv53.weight primals_27 = self.conv53.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
arthur-qiu/BasicSR
MINCNet
false
14,932
[ "Apache-2.0" ]
106
2e5f131edfc2adf912a1ed3b8c818a63d590a282
https://github.com/arthur-qiu/BasicSR/tree/2e5f131edfc2adf912a1ed3b8c818a63d590a282
BertLayerNorm
from torch.nn import Module import torch import torch.nn as nn class BertLayerNorm(Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.shape = torch.Size((hidden_size,)) self.eps = eps self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): u = x.mean(-1, keepdim=True) s = x - u s = s * s s = s.mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight * x + self.bias return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_1[grid(256)](primals_2, buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_3 return buf1, primals_1 class BertLayerNormNew(Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNew, self).__init__() self.shape = torch.Size((hidden_size,)) self.eps = eps self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
axiserr/Hetu
BertLayerNorm
false
14,933
[ "Apache-2.0" ]
82
0052f727488db0570d6b37f63549b43b0920bc29
https://github.com/axiserr/Hetu/tree/0052f727488db0570d6b37f63549b43b0920bc29
Softmax
import torch import torch.nn as nn class Softmax(nn.Module): def forward(self, x): y = torch.exp(x) return y / torch.sum(y, dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp5 = tl_math.exp(tmp4) tmp6 = tmp3 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tmp1 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SoftmaxNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
awlange/pysurvival
Softmax
false
14,934
[ "Apache-2.0" ]
242
841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
https://github.com/awlange/pysurvival/tree/841b9bc6ce700ba8898d2a1488aa9cd25ee7a8e6
LearnedPositionalEmbedding
import torch import torch.utils.data from torch import nn def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor: """ mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', padding_idx: 'int'): assert padding_idx is not None num_embeddings += padding_idx + 1 super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx ) def forward(self, input, use_cache=False): """Input is expected to be of size [bsz x seqlen].""" if use_cache: pos = int(self.padding_idx + input.size(1)) positions = input.data.new(1, 1).fill_(pos) else: positions = create_position_ids_from_input_ids(input, self. padding_idx) return super().forward(positions), positions def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_embeddings': 4, 'embedding_dim': 4, 'padding_idx': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = 4.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int32) tmp4 = tmp3.to(tl.int64) tmp5 = tmp4.to(tl.int64) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp7, xmask) @triton.jit def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.int32) tmp3 = 4.0 tmp4 = tmp2 != tmp3 tmp5 = tmp4.to(tl.int32) tmp6 = tmp1 * tmp5 tmp7 = tmp6.to(tl.int64) tmp8 = tl.full([1], 4, tl.int64) tmp9 = tmp7 + tmp8 tl.store(in_out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 9, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 9) | ~xmask, 'index out of bounds: 0 <= tmp4 < 9') tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (9, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) get_raw_stream(0) triton_per_fused__to_copy_cumsum_ne_0[grid(64)](primals_1, buf0, 64, 4, XBLOCK=1, num_warps=2, num_stages=1) buf1 = buf0 del buf0 triton_poi_fused__to_copy_add_mul_ne_1[grid(256)](buf1, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_embedding_2[grid(1024)](buf1, primals_2, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, buf1, buf1 def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor: """ mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx class LearnedPositionalEmbeddingNew(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', padding_idx: 'int'): assert padding_idx is not None num_embeddings += padding_idx + 1 super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx ) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
awslabs/gap-text2sql
LearnedPositionalEmbedding
false
14,935
[ "Apache-2.0" ]
75
83af3f08a6c108f7cbacb8125e2a7ec9255c81b0
https://github.com/awslabs/gap-text2sql/tree/83af3f08a6c108f7cbacb8125e2a7ec9255c81b0
LinearActivation
from torch.nn import Module import torch import torch.nn as nn class LinearActivation(Module): def __init__(self, in_features, out_features, act='gelu', bias=True): super(LinearActivation, self).__init__() self.Linear = nn.Linear(in_features, out_features, bias=bias) if act == 'relu': self.act_fn = nn.ReLU() elif act == 'tanh': self.act_fn = nn.Tanh() elif act == 'gelu': self.act_fn = nn.GELU() def forward(self, x): x = self.Linear(x) x = self.act_fn(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class LinearActivationNew(Module): def __init__(self, in_features, out_features, act='gelu', bias=True): super(LinearActivationNew, self).__init__() self.Linear = nn.Linear(in_features, out_features, bias=bias) if act == 'relu': self.act_fn = nn.ReLU() elif act == 'tanh': self.act_fn = nn.Tanh() elif act == 'gelu': self.act_fn = nn.GELU() def forward(self, input_0): primals_1 = self.Linear.weight primals_2 = self.Linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
axiserr/Hetu
LinearActivation
false
14,936
[ "Apache-2.0" ]
82
0052f727488db0570d6b37f63549b43b0920bc29
https://github.com/axiserr/Hetu/tree/0052f727488db0570d6b37f63549b43b0920bc29
BertSelfAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn.functional as F import torch.nn as nn class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = F.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = torch.reshape(context_layer, new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_add_div_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_div_1[grid(64)](buf5, primals_8, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_add_div_2[grid(256)](buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf10 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super(BertSelfAttentionNew, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, input_0, input_1): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
axiserr/Hetu
BertSelfAttention
false
14,937
[ "Apache-2.0" ]
82
0052f727488db0570d6b37f63549b43b0920bc29
https://github.com/axiserr/Hetu/tree/0052f727488db0570d6b37f63549b43b0920bc29
BertOutput
from _paritybench_helpers import _mock_config from torch.nn import Module import torch import torch.nn as nn class BertLayerNorm(Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.shape = torch.Size((hidden_size,)) self.eps = eps self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): u = x.mean(-1, keepdim=True) s = x - u s = s * s s = s.mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight * x + self.bias return x class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_2[grid(256)](primals_5, buf2, primals_6, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2 class BertLayerNorm(Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.shape = torch.Size((hidden_size,)) self.eps = eps self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): u = x.mean(-1, keepdim=True) s = x - u s = s * s s = s.mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight * x + self.bias return x class BertOutputNew(nn.Module): def __init__(self, config): super(BertOutputNew, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
axiserr/Hetu
BertOutput
false
14,938
[ "Apache-2.0" ]
82
0052f727488db0570d6b37f63549b43b0920bc29
https://github.com/axiserr/Hetu/tree/0052f727488db0570d6b37f63549b43b0920bc29
NIN
import string import torch import numpy as np import torch.utils.data import torch import torch.nn as nn def _einsum(a, b, c, x, y): einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c)) return torch.einsum(einsum_str, x, y) def contract_inner(x, y): """tensordot(x, y, 1).""" x_chars = list(string.ascii_lowercase[:len(x.shape)]) y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x .shape)]) y_chars[0] = x_chars[-1] out_chars = x_chars[:-1] + y_chars[1:] return _einsum(x_chars, y_chars, out_chars, x, y) def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0, dtype=torch.float32, device='cpu'): def _compute_fans(shape, in_axis=1, out_axis=0): receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis ] fan_in = shape[in_axis] * receptive_field_size fan_out = shape[out_axis] * receptive_field_size return fan_in, fan_out def init(shape, dtype=dtype, device=device): fan_in, fan_out = _compute_fans(shape, in_axis, out_axis) if mode == 'fan_in': denominator = fan_in elif mode == 'fan_out': denominator = fan_out elif mode == 'fan_avg': denominator = (fan_in + fan_out) / 2 else: raise ValueError( 'invalid mode for variance scaling initializer: {}'.format( mode)) variance = scale / denominator if distribution == 'normal': return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt( variance) elif distribution == 'uniform': return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0 ) * np.sqrt(3 * variance) else: raise ValueError( 'invalid distribution for variance scaling initializer') return init def default_init(scale=1.0): """The same initialization used in DDPM.""" scale = 1e-10 if scale == 0 else scale return variance_scaling(scale, 'fan_avg', 'uniform') class NIN(nn.Module): def __init__(self, in_dim, num_units, init_scale=0.1): super().__init__() self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True) self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True) def forward(self, x): x = x.permute(0, 2, 3, 1) y = contract_inner(x, self.W) + self.b return y.permute(0, 3, 1, 2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'num_units': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import string import numpy as np import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (1, 64, 4), (0, 4, 1), 0), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 1, 16, 4), 0 ), reinterpret_tensor(buf0, (1, 4, 64), (256, 1, 4), 0) def _einsum(a, b, c, x, y): einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c)) return torch.einsum(einsum_str, x, y) def contract_inner(x, y): """tensordot(x, y, 1).""" x_chars = list(string.ascii_lowercase[:len(x.shape)]) y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x .shape)]) y_chars[0] = x_chars[-1] out_chars = x_chars[:-1] + y_chars[1:] return _einsum(x_chars, y_chars, out_chars, x, y) def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0, dtype=torch.float32, device='cpu'): def _compute_fans(shape, in_axis=1, out_axis=0): receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis ] fan_in = shape[in_axis] * receptive_field_size fan_out = shape[out_axis] * receptive_field_size return fan_in, fan_out def init(shape, dtype=dtype, device=device): fan_in, fan_out = _compute_fans(shape, in_axis, out_axis) if mode == 'fan_in': denominator = fan_in elif mode == 'fan_out': denominator = fan_out elif mode == 'fan_avg': denominator = (fan_in + fan_out) / 2 else: raise ValueError( 'invalid mode for variance scaling initializer: {}'.format( mode)) variance = scale / denominator if distribution == 'normal': return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt( variance) elif distribution == 'uniform': return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0 ) * np.sqrt(3 * variance) else: raise ValueError( 'invalid distribution for variance scaling initializer') return init def default_init(scale=1.0): """The same initialization used in DDPM.""" scale = 1e-10 if scale == 0 else scale return variance_scaling(scale, 'fan_avg', 'uniform') class NINNew(nn.Module): def __init__(self, in_dim, num_units, init_scale=0.1): super().__init__() self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True) self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True) def forward(self, input_0): primals_2 = self.W primals_3 = self.b primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ayulockin/Image-Super-Resolution-via-Iterative-Refinement
NIN
false
14,939
[ "Apache-2.0" ]
1,764
8a75df33d9ed1a2cc0da22f36f576abfc9482913
https://github.com/ayulockin/Image-Super-Resolution-via-Iterative-Refinement/tree/8a75df33d9ed1a2cc0da22f36f576abfc9482913
CAM_Module
from torch.nn import Module import torch import torch.nn as nn from torch.nn import Parameter from torch.nn import Softmax class C(nn.Module): """ This class is for a convolutional layer. """ def __init__(self, nIn, nOut, kSize, stride=1): """ :param nIn: number of input channels :param nOut: number of output channels :param kSize: kernel size :param stride: optional stride rate for down-sampling """ super().__init__() padding = int((kSize - 1) / 2) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) def forward(self, input): """ :param input: input feature map :return: transformed feature map """ output = self.conv(input) return output class CAM_Module(Module): """ Channel attention module""" def __init__(self, in_dim): super(CAM_Module, self).__init__() self.chanel_in = in_dim self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, x): """ inputs : x : input feature maps( B X C X H X W) returns : out : attention value + input feature attention: B X C X C """ m_batchsize, C, height, width = x.size() proj_query = x.view(m_batchsize, C, -1) proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy ) - energy attention = self.softmax(energy_new) proj_value = x.view(m_batchsize, C, -1) out = torch.bmm(attention, proj_value) out = out.view(m_batchsize, C, height, width) out = self.gamma * out + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.nn as nn from torch.nn import Parameter from torch.nn import Softmax assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + x2, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_2, buf4, primals_1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf5, buf4 class C(nn.Module): """ This class is for a convolutional layer. """ def __init__(self, nIn, nOut, kSize, stride=1): """ :param nIn: number of input channels :param nOut: number of output channels :param kSize: kernel size :param stride: optional stride rate for down-sampling """ super().__init__() padding = int((kSize - 1) / 2) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) def forward(self, input): """ :param input: input feature map :return: transformed feature map """ output = self.conv(input) return output class CAM_ModuleNew(Module): """ Channel attention module""" def __init__(self, in_dim): super(CAM_ModuleNew, self).__init__() self.chanel_in = in_dim self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, input_0): primals_2 = self.gamma primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ayushmankumar7/pytorch-lanenet
CAM_Module
false
14,940
[ "MIT" ]
160
db9f116ba3f42dbfabf064e4a89ec068e9da4ee4
https://github.com/ayushmankumar7/pytorch-lanenet/tree/db9f116ba3f42dbfabf064e4a89ec068e9da4ee4
ZeroConv1d
import torch from torch import nn class ZeroConv1d(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv = nn.Conv1d(in_channel, out_channel, 1, padding=0) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.scale = nn.Parameter(torch.zeros(1, out_channel, 1)) def forward(self, x): out = self.conv(x) out = out * torch.exp(self.scale * 3) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0[grid(16)](buf1, primals_2, primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf2, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf1 class ZeroConv1dNew(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv = nn.Conv1d(in_channel, out_channel, 1, padding=0) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.scale = nn.Parameter(torch.zeros(1, out_channel, 1)) def forward(self, input_0): primals_4 = self.scale primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
batikim09/FloWaveNet
ZeroConv1d
false
14,941
[ "MIT" ]
499
791f51aff530b2af4f9aa0d9fcb4af53d28a0997
https://github.com/batikim09/FloWaveNet/tree/791f51aff530b2af4f9aa0d9fcb4af53d28a0997
L2N
import torch from torch import nn import torch.autograd def l2n(x, eps=1e-06): return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x) class L2N(nn.Module): def __init__(self, eps=1e-06): super(L2N, self).__init__() self.eps = eps def forward(self, x): return l2n(x, eps=self.eps) def __repr__(self): return self.__class__.__name__ + '(' + 'eps=' + str(self.eps) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def l2n(x, eps=1e-06): return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x) class L2NNew(nn.Module): def __init__(self, eps=1e-06): super(L2NNew, self).__init__() self.eps = eps def __repr__(self): return self.__class__.__name__ + '(' + 'eps=' + str(self.eps) + ')' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bestfitting/instance_level_recognition
L2N
false
14,942
[ "Apache-2.0" ]
103
683f021b4e65876835f028797ec28b0d1071bb45
https://github.com/bestfitting/instance_level_recognition/tree/683f021b4e65876835f028797ec28b0d1071bb45
Conv
import torch from torch import nn class Conv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, causal=True): super(Conv, self).__init__() self.causal = causal if self.causal: self.padding = dilation * (kernel_size - 1) else: self.padding = dilation * (kernel_size - 1) // 2 self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, padding=self.padding) self.conv = nn.utils.weight_norm(self.conv) nn.init.kaiming_normal_(self.conv.weight) def forward(self, tensor): out = self.conv(tensor) if self.causal and self.padding != 0: out = out[:, :, :-self.padding] return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 12 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 12 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32) get_raw_stream(0) triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2, primals_1, buf2, 4, 12, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 6), (24, 6, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(96)](buf4, primals_3, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf4, (4, 4, 4), (24, 6, 1), 0 ), buf2, primals_1, primals_2, primals_4, buf1, buf2 class ConvNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, causal=True): super(ConvNew, self).__init__() self.causal = causal if self.causal: self.padding = dilation * (kernel_size - 1) else: self.padding = dilation * (kernel_size - 1) // 2 self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, padding=self.padding) self.conv = nn.utils.weight_norm(self.conv) nn.init.kaiming_normal_(self.conv.weight) def forward(self, input_0): primals_3 = self.conv.bias primals_1 = self.conv.weight_g primals_2 = self.conv.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
batikim09/FloWaveNet
Conv
false
14,943
[ "MIT" ]
499
791f51aff530b2af4f9aa0d9fcb4af53d28a0997
https://github.com/batikim09/FloWaveNet/tree/791f51aff530b2af4f9aa0d9fcb4af53d28a0997
BertAttention
from _paritybench_helpers import _mock_config from torch.nn import Module import math import torch import torch.nn.functional as F import torch.nn as nn class BertLayerNorm(Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.shape = torch.Size((hidden_size,)) self.eps = eps self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): u = x.mean(-1, keepdim=True) s = x - u s = s * s s = s.mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight * x + self.bias return x class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = F.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = torch.reshape(context_layer, new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module import math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_add_div_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mean_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_div_1[grid(64)](buf5, primals_8, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_add_div_2[grid(256)](buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_10 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_mul_sub_4[grid(16)](buf12, primals_3, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_5[grid(64)](primals_11, buf12, primals_3, buf13, buf14, primals_12, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_12 return buf15, primals_3, primals_11, buf8, reinterpret_tensor(buf11, ( 16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertLayerNorm(Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.shape = torch.Size((hidden_size,)) self.eps = eps self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): u = x.mean(-1, keepdim=True) s = x - u s = s * s s = s.mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight * x + self.bias return x class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = F.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = torch.reshape(context_layer, new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
axiserr/Hetu
BertAttention
false
14,944
[ "Apache-2.0" ]
82
0052f727488db0570d6b37f63549b43b0920bc29
https://github.com/axiserr/Hetu/tree/0052f727488db0570d6b37f63549b43b0920bc29
CRF
import torch import torch.nn as nn class CRF(nn.Module): """ Implements Conditional Random Fields that can be trained via backpropagation. """ def __init__(self, num_tags): super(CRF, self).__init__() self.num_tags = num_tags self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags)) self.start_transitions = nn.Parameter(torch.randn(num_tags)) self.stop_transitions = nn.Parameter(torch.randn(num_tags)) nn.init.xavier_normal_(self.transitions) def forward(self, feats): if len(feats.shape) != 3: raise ValueError('feats must be 3-d got {}-d'.format(feats.shape)) return self._viterbi(feats) def loss(self, feats, tags): """ Computes negative log likelihood between features and tags. Essentially difference between individual sequence scores and sum of all possible sequence scores (partition function) Parameters: feats: Input features [batch size, sequence length, number of tags] tags: Target tag indices [batch size, sequence length]. Should be between 0 and num_tags Returns: Negative log likelihood [a scalar] """ if len(feats.shape) != 3: raise ValueError('feats must be 3-d got {}-d'.format(feats.shape)) if len(tags.shape) != 2: raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape) ) if feats.shape[:2] != tags.shape: raise ValueError( 'First two dimensions of feats and tags must match ', feats .shape, tags.shape) sequence_score = self._sequence_score(feats, tags) partition_function = self._partition_function(feats) log_probability = sequence_score - partition_function return -log_probability.mean() def _sequence_score(self, feats, tags): """ Parameters: feats: Input features [batch size, sequence length, number of tags] tags: Target tag indices [batch size, sequence length]. Should be between 0 and num_tags Returns: Sequence score of shape [batch size] """ feats.shape[0] feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1 ) tags_pairs = tags.unfold(1, 2, 1) indices = tags_pairs.permute(2, 0, 1).chunk(2) trans_score = self.transitions[indices].squeeze(0).sum(dim=-1) start_score = self.start_transitions[tags[:, 0]] stop_score = self.stop_transitions[tags[:, -1]] return feat_score + start_score + trans_score + stop_score def _partition_function(self, feats): """ Computes the partitition function for CRF using the forward algorithm. Basically calculate scores for all possible tag sequences for the given feature vector sequence Parameters: feats: Input features [batch size, sequence length, number of tags] Returns: Total scores of shape [batch size] """ _, seq_size, num_tags = feats.shape if self.num_tags != num_tags: raise ValueError('num_tags should be {} but got {}'.format(self .num_tags, num_tags)) a = feats[:, 0] + self.start_transitions.unsqueeze(0) transitions = self.transitions.unsqueeze(0) for i in range(1, seq_size): feat = feats[:, i].unsqueeze(1) a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1) return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1) def _viterbi(self, feats): """ Uses Viterbi algorithm to predict the best sequence Parameters: feats: Input features [batch size, sequence length, number of tags] Returns: Best tag sequence [batch size, sequence length] """ _, seq_size, num_tags = feats.shape if self.num_tags != num_tags: raise ValueError('num_tags should be {} but got {}'.format(self .num_tags, num_tags)) v = feats[:, 0] + self.start_transitions.unsqueeze(0) transitions = self.transitions.unsqueeze(0) paths = [] for i in range(1, seq_size): feat = feats[:, i] v, idx = (v.unsqueeze(-1) + transitions).max(1) paths.append(idx) v = v + feat v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True) tags = [tag] for idx in reversed(paths): tag = idx.gather(1, tag) tags.append(tag) tags.reverse() return torch.cat(tags, 1) def _log_sum_exp(self, logits, dim): """ Computes log-sum-exp in a stable way """ max_val, _ = logits.max(dim) return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log() def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_tags': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = triton_helpers.maximum(tmp5, tmp11) tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = triton_helpers.maximum(tmp19, tmp25) tmp27 = tmp5 > tmp11 tmp28 = tmp5 == tmp11 tmp29 = tmp5 != tmp5 tmp30 = tmp11 != tmp11 tmp31 = tmp29 > tmp30 tmp32 = tmp27 | tmp31 tmp33 = tmp29 & tmp30 tmp34 = tmp28 | tmp33 tmp35 = tl.full([1], 0, tl.int64) tmp36 = tl.full([1], 1, tl.int64) tmp37 = tmp35 < tmp36 tmp38 = tmp34 & tmp37 tmp39 = tmp32 | tmp38 tmp40 = tl.where(tmp39, tmp5, tmp11) tmp41 = tl.where(tmp39, tmp35, tmp36) tmp42 = tmp40 > tmp18 tmp43 = tmp40 == tmp18 tmp44 = tmp40 != tmp40 tmp45 = tmp18 != tmp18 tmp46 = tmp44 > tmp45 tmp47 = tmp42 | tmp46 tmp48 = tmp44 & tmp45 tmp49 = tmp43 | tmp48 tmp50 = tl.full([1], 2, tl.int64) tmp51 = tmp41 < tmp50 tmp52 = tmp49 & tmp51 tmp53 = tmp47 | tmp52 tmp54 = tl.where(tmp53, tmp40, tmp18) tmp55 = tl.where(tmp53, tmp41, tmp50) tmp56 = tmp54 > tmp25 tmp57 = tmp54 == tmp25 tmp58 = tmp54 != tmp54 tmp59 = tmp25 != tmp25 tmp60 = tmp58 > tmp59 tmp61 = tmp56 | tmp60 tmp62 = tmp58 & tmp59 tmp63 = tmp57 | tmp62 tmp64 = tl.full([1], 3, tl.int64) tmp65 = tmp55 < tmp64 tmp66 = tmp63 & tmp65 tmp67 = tmp61 | tmp66 tl.where(tmp67, tmp54, tmp25) tmp69 = tl.where(tmp67, tmp55, tmp64) tl.store(out_ptr0 + x2, tmp26, xmask) tl.store(out_ptr1 + x2, tmp69, xmask) @triton.jit def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 > tmp9 tmp24 = tmp4 == tmp9 tmp25 = tmp4 != tmp4 tmp26 = tmp9 != tmp9 tmp27 = tmp25 > tmp26 tmp28 = tmp23 | tmp27 tmp29 = tmp25 & tmp26 tmp30 = tmp24 | tmp29 tmp31 = tl.full([1], 0, tl.int64) tmp32 = tl.full([1], 1, tl.int64) tmp33 = tmp31 < tmp32 tmp34 = tmp30 & tmp33 tmp35 = tmp28 | tmp34 tmp36 = tl.where(tmp35, tmp4, tmp9) tmp37 = tl.where(tmp35, tmp31, tmp32) tmp38 = tmp36 > tmp15 tmp39 = tmp36 == tmp15 tmp40 = tmp36 != tmp36 tmp41 = tmp15 != tmp15 tmp42 = tmp40 > tmp41 tmp43 = tmp38 | tmp42 tmp44 = tmp40 & tmp41 tmp45 = tmp39 | tmp44 tmp46 = tl.full([1], 2, tl.int64) tmp47 = tmp37 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp43 | tmp48 tmp50 = tl.where(tmp49, tmp36, tmp15) tmp51 = tl.where(tmp49, tmp37, tmp46) tmp52 = tmp50 > tmp21 tmp53 = tmp50 == tmp21 tmp54 = tmp50 != tmp50 tmp55 = tmp21 != tmp21 tmp56 = tmp54 > tmp55 tmp57 = tmp52 | tmp56 tmp58 = tmp54 & tmp55 tmp59 = tmp53 | tmp58 tmp60 = tl.full([1], 3, tl.int64) tmp61 = tmp51 < tmp60 tmp62 = tmp59 & tmp61 tmp63 = tmp57 | tmp62 tl.where(tmp63, tmp50, tmp21) tmp65 = tl.where(tmp63, tmp51, tmp60) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp65, xmask) @triton.jit def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 > tmp9 tmp24 = tmp4 == tmp9 tmp25 = tmp4 != tmp4 tmp26 = tmp9 != tmp9 tmp27 = tmp25 > tmp26 tmp28 = tmp23 | tmp27 tmp29 = tmp25 & tmp26 tmp30 = tmp24 | tmp29 tmp31 = tl.full([1], 0, tl.int64) tmp32 = tl.full([1], 1, tl.int64) tmp33 = tmp31 < tmp32 tmp34 = tmp30 & tmp33 tmp35 = tmp28 | tmp34 tmp36 = tl.where(tmp35, tmp4, tmp9) tmp37 = tl.where(tmp35, tmp31, tmp32) tmp38 = tmp36 > tmp15 tmp39 = tmp36 == tmp15 tmp40 = tmp36 != tmp36 tmp41 = tmp15 != tmp15 tmp42 = tmp40 > tmp41 tmp43 = tmp38 | tmp42 tmp44 = tmp40 & tmp41 tmp45 = tmp39 | tmp44 tmp46 = tl.full([1], 2, tl.int64) tmp47 = tmp37 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp43 | tmp48 tmp50 = tl.where(tmp49, tmp36, tmp15) tmp51 = tl.where(tmp49, tmp37, tmp46) tmp52 = tmp50 > tmp21 tmp53 = tmp50 == tmp21 tmp54 = tmp50 != tmp50 tmp55 = tmp21 != tmp21 tmp56 = tmp54 > tmp55 tmp57 = tmp52 | tmp56 tmp58 = tmp54 & tmp55 tmp59 = tmp53 | tmp58 tmp60 = tl.full([1], 3, tl.int64) tmp61 = tmp51 < tmp60 tmp62 = tmp59 & tmp61 tmp63 = tmp57 | tmp62 tl.where(tmp63, tmp50, tmp21) tmp65 = tl.where(tmp63, tmp51, tmp60) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp65, xmask) @triton.jit def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + 1) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr2 + 2) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp47 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr2 + 3) tmp51 = tl.broadcast_to(tmp50, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 + tmp4 tmp8 = tmp6 + tmp7 tmp11 = tmp8 + tmp10 tmp12 = tmp5 > tmp11 tmp13 = tmp5 == tmp11 tmp14 = tmp5 != tmp5 tmp15 = tmp11 != tmp11 tmp16 = tmp14 > tmp15 tmp17 = tmp12 | tmp16 tmp18 = tmp14 & tmp15 tmp19 = tmp13 | tmp18 tmp20 = tl.full([1], 0, tl.int64) tmp21 = tl.full([1], 1, tl.int64) tmp22 = tmp20 < tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 | tmp23 tmp25 = tl.where(tmp24, tmp5, tmp11) tmp26 = tl.where(tmp24, tmp20, tmp21) tmp29 = tmp27 + tmp28 tmp32 = tmp29 + tmp31 tmp33 = tmp25 > tmp32 tmp34 = tmp25 == tmp32 tmp35 = tmp25 != tmp25 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 2, tl.int64) tmp42 = tmp26 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp25, tmp32) tmp46 = tl.where(tmp44, tmp26, tmp41) tmp49 = tmp47 + tmp48 tmp52 = tmp49 + tmp51 tmp53 = tmp45 > tmp52 tmp54 = tmp45 == tmp52 tmp55 = tmp45 != tmp45 tmp56 = tmp52 != tmp52 tmp57 = tmp55 > tmp56 tmp58 = tmp53 | tmp57 tmp59 = tmp55 & tmp56 tmp60 = tmp54 | tmp59 tmp61 = tl.full([1], 3, tl.int64) tmp62 = tmp46 < tmp61 tmp63 = tmp60 & tmp62 tmp64 = tmp58 | tmp63 tl.where(tmp64, tmp45, tmp52) tmp66 = tl.where(tmp64, tmp46, tmp61) tmp67 = tl.full([XBLOCK], 4, tl.int32) tmp68 = tmp66 + tmp67 tmp69 = tmp66 < 0 tmp70 = tl.where(tmp69, tmp68, tmp66) tl.device_assert((0 <= tmp70) & (tmp70 < 4) | ~xmask, 'index out of bounds: 0 <= tmp70 < 4') tmp72 = tl.load(in_ptr3 + (tmp70 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp73 = tmp72 + tmp67 tmp74 = tmp72 < 0 tmp75 = tl.where(tmp74, tmp73, tmp72) tl.device_assert((0 <= tmp75) & (tmp75 < 4) | ~xmask, 'index out of bounds: 0 <= tmp75 < 4') tmp77 = tl.load(in_ptr4 + (tmp75 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp78 = tmp77 + tmp67 tmp79 = tmp77 < 0 tmp80 = tl.where(tmp79, tmp78, tmp77) tl.device_assert((0 <= tmp80) & (tmp80 < 4) | ~xmask, 'index out of bounds: 0 <= tmp80 < 4') tmp82 = tl.load(in_ptr5 + (tmp80 + 4 * x0), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + 4 * x0, tmp66, xmask) tl.store(out_ptr1 + 4 * x0, tmp82, xmask) tl.store(out_ptr2 + 4 * x0, tmp77, xmask) tl.store(out_ptr3 + 4 * x0, tmp72, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_add_max_0[grid(16)](arg0_1, arg1_1, arg2_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused_add_max_1[grid(16)](buf0, arg0_1, arg2_1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf0 del buf0 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused_add_max_2[grid(16)](buf2, arg0_1, arg2_1, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg2_1 del buf2 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64) buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3) buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0) buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1) buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2) triton_poi_fused_add_gather_max_3[grid(4)](buf4, arg0_1, arg3_1, buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 del arg3_1 del buf1 del buf3 del buf4 del buf5 return buf10, class CRFNew(nn.Module): """ Implements Conditional Random Fields that can be trained via backpropagation. """ def __init__(self, num_tags): super(CRFNew, self).__init__() self.num_tags = num_tags self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags)) self.start_transitions = nn.Parameter(torch.randn(num_tags)) self.stop_transitions = nn.Parameter(torch.randn(num_tags)) nn.init.xavier_normal_(self.transitions) def loss(self, feats, tags): """ Computes negative log likelihood between features and tags. Essentially difference between individual sequence scores and sum of all possible sequence scores (partition function) Parameters: feats: Input features [batch size, sequence length, number of tags] tags: Target tag indices [batch size, sequence length]. Should be between 0 and num_tags Returns: Negative log likelihood [a scalar] """ if len(feats.shape) != 3: raise ValueError('feats must be 3-d got {}-d'.format(feats.shape)) if len(tags.shape) != 2: raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape) ) if feats.shape[:2] != tags.shape: raise ValueError( 'First two dimensions of feats and tags must match ', feats .shape, tags.shape) sequence_score = self._sequence_score(feats, tags) partition_function = self._partition_function(feats) log_probability = sequence_score - partition_function return -log_probability.mean() def _sequence_score(self, feats, tags): """ Parameters: feats: Input features [batch size, sequence length, number of tags] tags: Target tag indices [batch size, sequence length]. Should be between 0 and num_tags Returns: Sequence score of shape [batch size] """ feats.shape[0] feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1 ) tags_pairs = tags.unfold(1, 2, 1) indices = tags_pairs.permute(2, 0, 1).chunk(2) trans_score = self.transitions[indices].squeeze(0).sum(dim=-1) start_score = self.start_transitions[tags[:, 0]] stop_score = self.stop_transitions[tags[:, -1]] return feat_score + start_score + trans_score + stop_score def _partition_function(self, feats): """ Computes the partitition function for CRF using the forward algorithm. Basically calculate scores for all possible tag sequences for the given feature vector sequence Parameters: feats: Input features [batch size, sequence length, number of tags] Returns: Total scores of shape [batch size] """ _, seq_size, num_tags = feats.shape if self.num_tags != num_tags: raise ValueError('num_tags should be {} but got {}'.format(self .num_tags, num_tags)) a = feats[:, 0] + self.start_transitions.unsqueeze(0) transitions = self.transitions.unsqueeze(0) for i in range(1, seq_size): feat = feats[:, i].unsqueeze(1) a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1) return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1) def _viterbi(self, feats): """ Uses Viterbi algorithm to predict the best sequence Parameters: feats: Input features [batch size, sequence length, number of tags] Returns: Best tag sequence [batch size, sequence length] """ _, seq_size, num_tags = feats.shape if self.num_tags != num_tags: raise ValueError('num_tags should be {} but got {}'.format(self .num_tags, num_tags)) v = feats[:, 0] + self.start_transitions.unsqueeze(0) transitions = self.transitions.unsqueeze(0) paths = [] for i in range(1, seq_size): feat = feats[:, i] v, idx = (v.unsqueeze(-1) + transitions).max(1) paths.append(idx) v = v + feat v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True) tags = [tag] for idx in reversed(paths): tag = idx.gather(1, tag) tags.append(tag) tags.reverse() return torch.cat(tags, 1) def _log_sum_exp(self, logits, dim): """ Computes log-sum-exp in a stable way """ max_val, _ = logits.max(dim) return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log() def forward(self, input_0): arg2_1 = self.transitions arg1_1 = self.start_transitions arg3_1 = self.stop_transitions arg0_1 = input_0 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
ay94/CrossNER
CRF
false
14,945
[ "MIT" ]
77
2e7ba2a7798c961e3f29fbc51252c5a8d40224bf
https://github.com/ay94/CrossNER/tree/2e7ba2a7798c961e3f29fbc51252c5a8d40224bf
MuSigmaEncoder
import torch from torch import nn class MuSigmaEncoder(nn.Module): """ Maps a representation r to mu and sigma which will define the normal distribution from which we sample the latent variable z. Parameters ---------- r_dim : int Dimension of output representation r. z_dim : int Dimension of latent variable z. """ def __init__(self, r_dim, z_dim): super(MuSigmaEncoder, self).__init__() self.r_dim = r_dim self.z_dim = z_dim self.r_to_hidden = nn.Linear(r_dim, r_dim) self.hidden_to_mu = nn.Linear(r_dim, z_dim) self.hidden_to_sigma = nn.Linear(r_dim, z_dim) def forward(self, r): """ r : torch.Tensor Shape (batch_size, r_dim) """ hidden = torch.relu(self.r_to_hidden(r)) mu = self.hidden_to_mu(hidden) sigma = 0.1 + 0.9 * torch.sigmoid(self.hidden_to_sigma(hidden)) return mu, sigma def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'r_dim': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 0.9 tmp3 = tmp1 * tmp2 tmp4 = 0.1 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf3, primals_6, primals_4, buf5 class MuSigmaEncoderNew(nn.Module): """ Maps a representation r to mu and sigma which will define the normal distribution from which we sample the latent variable z. Parameters ---------- r_dim : int Dimension of output representation r. z_dim : int Dimension of latent variable z. """ def __init__(self, r_dim, z_dim): super(MuSigmaEncoderNew, self).__init__() self.r_dim = r_dim self.z_dim = z_dim self.r_to_hidden = nn.Linear(r_dim, r_dim) self.hidden_to_mu = nn.Linear(r_dim, z_dim) self.hidden_to_sigma = nn.Linear(r_dim, z_dim) def forward(self, input_0): primals_1 = self.r_to_hidden.weight primals_2 = self.r_to_hidden.bias primals_4 = self.hidden_to_mu.weight primals_5 = self.hidden_to_mu.bias primals_6 = self.hidden_to_sigma.weight primals_7 = self.hidden_to_sigma.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
benjaminalt/neural-processes
MuSigmaEncoder
false
14,946
[ "MIT" ]
170
03d4f921fe0598c77787eecc53cbed23e326a5f5
https://github.com/benjaminalt/neural-processes/tree/03d4f921fe0598c77787eecc53cbed23e326a5f5
SigmoidFocalLoss
import torch import torch.nn as nn class SigmoidFocalLoss(nn.Module): def __init__(self, gamma, alpha): super().__init__() self.gamma = gamma self.alpha = alpha def forward(self, out, target): n_class = out.shape[1] class_ids = torch.arange(1, n_class + 1, dtype=target.dtype, device =target.device).unsqueeze(0) t = target.unsqueeze(1) p = torch.sigmoid(out) gamma = self.gamma alpha = self.alpha term1 = (1 - p) ** gamma * torch.log(p) term2 = p ** gamma * torch.log(1 - p) loss = -(t == class_ids).float() * alpha * term1 - ((t != class_ids ) * (t >= 0)).float() * (1 - alpha) * term2 return loss.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'gamma': 4, 'alpha': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0( in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex // 256 r5 = rindex % 64 r0 = rindex % 4 r7 = rindex % 256 tmp0 = tl.load(in_ptr0 + (r5 + 64 * r3), None, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr1 + r7, None, eviction_policy='evict_last') tmp1 = 1 + r0 tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 == tmp2 tmp4 = tmp3.to(tl.float32) tmp5 = -tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp9 = tl.sigmoid(tmp8) tmp10 = 1.0 tmp11 = tmp10 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl_math.log(tmp9) tmp15 = tmp13 * tmp14 tmp16 = tmp7 * tmp15 tmp17 = tmp0 != tmp2 tmp18 = 0.0 tmp19 = tmp0 >= tmp18 tmp20 = tmp17 & tmp19 tmp21 = tmp20.to(tl.float32) tmp22 = -3.0 tmp23 = tmp21 * tmp22 tmp24 = tmp9 * tmp9 tmp25 = tmp24 * tmp24 tmp26 = tl_math.log(tmp11) tmp27 = tmp25 * tmp26 tmp28 = tmp23 * tmp27 tmp29 = tmp16 - tmp28 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_eq_ge_log_mul_ne_neg_pow_rsub_sigmoid_sub_sum_0[ grid(1)](arg1_1, arg0_1, buf1, 1, 1024, num_warps=8, num_stages=1) del arg0_1 del arg1_1 return buf1, class SigmoidFocalLossNew(nn.Module): def __init__(self, gamma, alpha): super().__init__() self.gamma = gamma self.alpha = alpha def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
berserkrambo/fcos-pytorch
SigmoidFocalLoss
false
14,947
[ "MIT" ]
63
a064eccf6d45fc85da401151dcefe7a3b01a065b
https://github.com/berserkrambo/fcos-pytorch/tree/a064eccf6d45fc85da401151dcefe7a3b01a065b
RNNAgent
from _paritybench_helpers import _mock_config import torch import torch.nn.functional as F import torch.nn as nn class RNNAgent(nn.Module): def __init__(self, input_shape, args): super(RNNAgent, self).__init__() self.args = args self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim) self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim) self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions) def init_hidden(self): return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_() def forward(self, inputs, hidden_state=None): b, a, e = inputs.size() x = F.relu(self.fc1(inputs.view(-1, e)), inplace=True) if hidden_state is not None: hidden_state = hidden_state.reshape(-1, self.args.rnn_hidden_dim) h = self.rnn(x, hidden_state) q = self.fc2(h) return q.view(b, a, -1), h.view(b, a, -1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': 4, 'args': _mock_config(rnn_hidden_dim=4, n_actions=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_zeros_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, 4), (4, 1)) assert_size_stride(primals_6, (12,), (1,)) assert_size_stride(primals_7, (12,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_zeros_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf4) del primals_5 buf5 = torch.ops.aten._thnn_fused_gru_cell.default(buf3, buf4, buf2, primals_6, primals_7) del buf3 del buf4 del primals_6 del primals_7 buf6 = buf5[0] buf7 = buf5[1] del buf5 buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf6, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_9 return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf1, buf2, buf6, buf7, primals_8, primals_4 class RNNAgentNew(nn.Module): def __init__(self, input_shape, args): super(RNNAgentNew, self).__init__() self.args = args self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim) self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim) self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions) def init_hidden(self): return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.rnn.weight_ih primals_5 = self.rnn.weight_hh primals_6 = self.rnn.bias_ih primals_7 = self.rnn.bias_hh primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
benellis3/pymarl2
RNNAgent
false
14,948
[ "Apache-2.0" ]
401
0875995a0e0b9692ea64484478b369c7f6c0cf44
https://github.com/benellis3/pymarl2/tree/0875995a0e0b9692ea64484478b369c7f6c0cf44
Masked_MSE_Loss
import torch import torch.nn as nn def check_loss_input(im0, im1, w): """ im0 is out and im1 is target and w is mask""" assert list(im0.size())[2:] == list(im1.size())[2:], 'spatial dim mismatch' if w is not None: assert list(im0.size())[2:] == list(w.size())[2: ], 'spatial dim mismatch' if im1.size(0) != 1: assert im0.size(0) == im1.size(0) if w is not None and w.size(0) != 1: assert im0.size(0) == w.size(0) return class Masked_MSE_Loss(nn.Module): def __init__(self): super(Masked_MSE_Loss, self).__init__() self.loss = nn.MSELoss(reduction='none') def forward(self, pred, ref, w=None): """ ims have dimension BCHW while mask is B1HW """ check_loss_input(pred, ref, w) loss = self.loss(pred, ref) assert pred.shape[1] == ref.shape[1] channels = pred.shape[1] if w is not None: w = w.repeat(1, channels, 1, 1) n = torch.sum(loss * w, [1, 2, 3]) d = torch.sum(w, [1, 2, 3]) loss = n / d return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mse_loss_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mse_loss_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def check_loss_input(im0, im1, w): """ im0 is out and im1 is target and w is mask""" assert list(im0.size())[2:] == list(im1.size())[2:], 'spatial dim mismatch' if w is not None: assert list(im0.size())[2:] == list(w.size())[2: ], 'spatial dim mismatch' if im1.size(0) != 1: assert im0.size(0) == im1.size(0) if w is not None and w.size(0) != 1: assert im0.size(0) == w.size(0) return class Masked_MSE_LossNew(nn.Module): def __init__(self): super(Masked_MSE_LossNew, self).__init__() self.loss = nn.MSELoss(reduction='none') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bg459/gan-ensembling-loader
Masked_MSE_Loss
false
14,949
[ "MIT" ]
86
5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8
https://github.com/bg459/gan-ensembling-loader/tree/5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8
SelfAttention
import torch import torch.nn.functional as F import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, input_size, heads, embed_size): super().__init__() self.input_size = input_size self.heads = heads self.emb_size = embed_size self.tokeys = nn.Linear(self.input_size, self.emb_size * heads, bias=False) self.toqueries = nn.Linear(self.input_size, self.emb_size * heads, bias=False) self.tovalues = nn.Linear(self.input_size, self.emb_size * heads, bias=False) def forward(self, x): b, t, hin = x.size() assert hin == self.input_size, 'Input size {hin} should match {self.input_size}' h = self.heads e = self.emb_size keys = self.tokeys(x).view(b, t, h, e) queries = self.toqueries(x).view(b, t, h, e) values = self.tovalues(x).view(b, t, h, e) keys = keys.transpose(1, 2).contiguous().view(b * h, t, e) queries = queries.transpose(1, 2).contiguous().view(b * h, t, e) values = values.transpose(1, 2).contiguous().view(b * h, t, e) queries = queries / e ** (1 / 4) keys = keys / e ** (1 / 4) dot = torch.bmm(queries, keys.transpose(1, 2)) assert dot.size() == (b * h, t, t) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, e) out = out.transpose(1, 2).contiguous().view(b, t, h * e) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'heads': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 4) + 16 * x2 + 64 * (x1 // 4)), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x2 % 4) + 16 * x1 + 64 * (x2 // 4)), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4, 4), (4, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_div_1[grid(256)](buf0, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0) del buf9 triton_poi_fused_transpose_5[grid(256)](buf3, buf11, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf3 return reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), buf11, buf4 class SelfAttentionNew(nn.Module): def __init__(self, input_size, heads, embed_size): super().__init__() self.input_size = input_size self.heads = heads self.emb_size = embed_size self.tokeys = nn.Linear(self.input_size, self.emb_size * heads, bias=False) self.toqueries = nn.Linear(self.input_size, self.emb_size * heads, bias=False) self.tovalues = nn.Linear(self.input_size, self.emb_size * heads, bias=False) def forward(self, input_0): primals_2 = self.tokeys.weight primals_3 = self.toqueries.weight primals_4 = self.tovalues.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
benellis3/pymarl2
SelfAttention
false
14,950
[ "Apache-2.0" ]
401
0875995a0e0b9692ea64484478b369c7f6c0cf44
https://github.com/benellis3/pymarl2/tree/0875995a0e0b9692ea64484478b369c7f6c0cf44
HeatedUpScalar
import torch import torch.nn as nn class HeatedUpScalar(nn.Module): def __init__(self, first_value, last_value, nb_steps, scope='task', ** kwargs): super().__init__() self.scope = scope self.first_value = first_value self.step = (max(first_value, last_value) - min(first_value, last_value)) / (nb_steps - 1) if first_value > last_value: self._factor = -1 else: self._factor = 1 self._increment = 0 None def on_task_end(self): if self.scope == 'task': self._increment += 1 None def on_epoch_end(self): if self.scope == 'epoch': self._increment += 1 @property def factor(self): return self.first_value + self._factor * self._increment * self.step def forward(self, inputs): return self.factor * inputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'first_value': 4, 'last_value': 4, 'nb_steps': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HeatedUpScalarNew(nn.Module): def __init__(self, first_value, last_value, nb_steps, scope='task', ** kwargs): super().__init__() self.scope = scope self.first_value = first_value self.step = (max(first_value, last_value) - min(first_value, last_value)) / (nb_steps - 1) if first_value > last_value: self._factor = -1 else: self._factor = 1 self._increment = 0 None def on_task_end(self): if self.scope == 'task': self._increment += 1 None def on_epoch_end(self): if self.scope == 'epoch': self._increment += 1 @property def factor(self): return self.first_value + self._factor * self._increment * self.step def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
billpsomas/incremental_learning.pytorch
HeatedUpScalar
false
14,951
[ "MIT" ]
277
a401a6609fc61c74698739cf937c0ece1c10913f
https://github.com/billpsomas/incremental_learning.pytorch/tree/a401a6609fc61c74698739cf937c0ece1c10913f
NacCell
import torch from torch import Tensor from torch import nn from torch.nn.parameter import Parameter from torch.nn.init import xavier_uniform_ from torch.nn.functional import linear from torch import sigmoid from torch import tanh class NacCell(nn.Module): """Basic NAC unit implementation from https://arxiv.org/pdf/1808.00508.pdf """ def __init__(self, in_shape, out_shape): """ in_shape: input sample dimension out_shape: output sample dimension """ super().__init__() self.in_shape = in_shape self.out_shape = out_shape self.W_ = Parameter(Tensor(out_shape, in_shape)) self.M_ = Parameter(Tensor(out_shape, in_shape)) xavier_uniform_(self.W_), xavier_uniform_(self.M_) self.register_parameter('bias', None) def forward(self, input): W = tanh(self.W_) * sigmoid(self.M_) return linear(input, W, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_shape': 4, 'out_shape': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import Tensor from torch import nn from torch.nn.parameter import Parameter from torch.nn.init import xavier_uniform_ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(16)](primals_1, primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class NacCellNew(nn.Module): """Basic NAC unit implementation from https://arxiv.org/pdf/1808.00508.pdf """ def __init__(self, in_shape, out_shape): """ in_shape: input sample dimension out_shape: output sample dimension """ super().__init__() self.in_shape = in_shape self.out_shape = out_shape self.W_ = Parameter(Tensor(out_shape, in_shape)) self.M_ = Parameter(Tensor(out_shape, in_shape)) xavier_uniform_(self.W_), xavier_uniform_(self.M_) self.register_parameter('bias', None) def forward(self, input_0): primals_1 = self.W_ primals_2 = self.M_ primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
bharathgs/NALU
NacCell
false
14,952
[ "MIT" ]
118
5d52cc270786563b67837a3856841baafba20e60
https://github.com/bharathgs/NALU/tree/5d52cc270786563b67837a3856841baafba20e60
Masked_L1_Loss
import torch import torch.nn as nn def check_loss_input(im0, im1, w): """ im0 is out and im1 is target and w is mask""" assert list(im0.size())[2:] == list(im1.size())[2:], 'spatial dim mismatch' if w is not None: assert list(im0.size())[2:] == list(w.size())[2: ], 'spatial dim mismatch' if im1.size(0) != 1: assert im0.size(0) == im1.size(0) if w is not None and w.size(0) != 1: assert im0.size(0) == w.size(0) return class Masked_L1_Loss(nn.Module): def __init__(self): super(Masked_L1_Loss, self).__init__() self.loss = nn.L1Loss(reduction='none') def forward(self, pred, ref, w=None): """ ims have dimension BCHW while mask is B1HW """ check_loss_input(pred, ref, w) loss = self.loss(pred, ref) assert pred.shape[1] == ref.shape[1] channels = pred.shape[1] if w is not None: w = w.repeat(1, channels, 1, 1) n = torch.sum(loss * w, [1, 2, 3]) d = torch.sum(w, [1, 2, 3]) loss = n / d return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def check_loss_input(im0, im1, w): """ im0 is out and im1 is target and w is mask""" assert list(im0.size())[2:] == list(im1.size())[2:], 'spatial dim mismatch' if w is not None: assert list(im0.size())[2:] == list(w.size())[2: ], 'spatial dim mismatch' if im1.size(0) != 1: assert im0.size(0) == im1.size(0) if w is not None and w.size(0) != 1: assert im0.size(0) == w.size(0) return class Masked_L1_LossNew(nn.Module): def __init__(self): super(Masked_L1_LossNew, self).__init__() self.loss = nn.L1Loss(reduction='none') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bg459/gan-ensembling-loader
Masked_L1_Loss
false
14,953
[ "MIT" ]
86
5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8
https://github.com/bg459/gan-ensembling-loader/tree/5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8
FixupBasicBlock
import torch from torch import nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class FixupBasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(FixupBasicBlock, self).__init__() self.bias1a = nn.Parameter(torch.zeros(1)) self.conv1 = conv3x3(inplanes, planes, stride) self.bias1b = nn.Parameter(torch.zeros(1)) self.relu = nn.ReLU(inplace=True) self.bias2a = nn.Parameter(torch.zeros(1)) self.conv2 = conv3x3(planes, planes) self.scale = nn.Parameter(torch.ones(1)) self.bias2b = nn.Parameter(torch.zeros(1)) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x + self.bias1a) out = self.relu(out + self.bias1b) out = self.conv2(out + self.bias2a) out = out * self.scale + self.bias2b if self.downsample is not None: identity = self.downsample(x + self.bias1a) out += identity out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp8 = tmp5 + tmp7 tmp9 = 0.0 tmp10 = tmp5 <= tmp9 tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr3 + x0, xmask) tmp3 = tmp0 * tmp2 tmp6 = tmp3 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = 0.0 tmp12 = tmp10 <= tmp11 tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf1, primals_4, primals_5, buf2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 del primals_5 buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf1 del buf1 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_mul_relu_threshold_backward_2[grid(256)](buf3, primals_7, primals_8, primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_8 return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6 def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class FixupBasicBlockNew(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(FixupBasicBlockNew, self).__init__() self.bias1a = nn.Parameter(torch.zeros(1)) self.conv1 = conv3x3(inplanes, planes, stride) self.bias1b = nn.Parameter(torch.zeros(1)) self.relu = nn.ReLU(inplace=True) self.bias2a = nn.Parameter(torch.zeros(1)) self.conv2 = conv3x3(planes, planes) self.scale = nn.Parameter(torch.ones(1)) self.bias2b = nn.Parameter(torch.zeros(1)) self.downsample = downsample self.stride = stride def forward(self, input_0): primals_2 = self.bias1a primals_4 = self.bias1b primals_5 = self.bias2a primals_7 = self.scale primals_8 = self.bias2b primals_3 = self.conv1.weight primals_6 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
bethgelab/robustness
FixupBasicBlock
false
14,954
[ "Apache-2.0" ]
67
aa0a6798fe3973bae5f47561721b59b39f126ab7
https://github.com/bethgelab/robustness/tree/aa0a6798fe3973bae5f47561721b59b39f126ab7
EncoderLayer
import math import torch from torch import nn import torch.nn.functional as F import torch.utils.data.distributed def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class FeedForward(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.linear1 = nn.Linear(d_model, d_hidden) self.linear2 = nn.Linear(d_hidden, d_model) def forward(self, x): return self.linear2(F.relu(self.linear1(x))) class Attention(nn.Module): def __init__(self, d_key, drop_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(drop_ratio) self.causal = causal def forward(self, query, key, value): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and (self is None or self.causal): tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF if key.is_cuda: tri = tri dot_products.data.sub_(tri.unsqueeze(0)) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False): super().__init__() self.attention = Attention(d_key, drop_ratio, causal=causal) self.wq = nn.Linear(d_key, d_key, bias=False) self.wk = nn.Linear(d_key, d_key, bias=False) self.wv = nn.Linear(d_value, d_value, bias=False) self.wo = nn.Linear(d_value, d_key, bias=False) self.n_heads = n_heads def forward(self, query, key, value): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in zip(query, key, value)], -1)) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, drop_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(drop_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x): return self.layernorm(x[0] + self.dropout(self.layer(*x))) class EncoderLayer(nn.Module): def __init__(self, d_model, d_hidden, n_heads, drop_ratio): super().__init__() self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads, drop_ratio), d_model, drop_ratio) self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden), d_model, drop_ratio) def forward(self, x): return self.feedforward(self.selfattn(x, x, x)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_hidden': 4, 'n_heads': 4, 'drop_ratio': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.nn.functional as F import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tmp0 * tmp5 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tmp11 = tmp6 / tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf20) buf21 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0) del buf6 buf22 = buf21 del buf21 buf23 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0) del buf18 triton_poi_fused_add_mean_std_3[grid(16)](buf22, primals_2, buf20, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_6, primals_2, buf20, buf23, buf22, primals_7, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf22 del buf23 del primals_7 buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf25) buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0) del buf25 buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf26, primals_9, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf27) buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0) del buf27 triton_poi_fused_add_6[grid(64)](buf28, buf24, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_12, buf28, primals_13, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 return (buf29, primals_2, primals_6, primals_12, buf5, buf9, buf13, buf17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20, reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor( buf26, (16, 4), (4, 1), 0), buf28, primals_10, buf30, primals_8, primals_5, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0)) def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class FeedForward(nn.Module): def __init__(self, d_model, d_hidden): super().__init__() self.linear1 = nn.Linear(d_model, d_hidden) self.linear2 = nn.Linear(d_hidden, d_model) def forward(self, x): return self.linear2(F.relu(self.linear1(x))) class Attention(nn.Module): def __init__(self, d_key, drop_ratio, causal): super().__init__() self.scale = math.sqrt(d_key) self.dropout = nn.Dropout(drop_ratio) self.causal = causal def forward(self, query, key, value): dot_products = matmul(query, key.transpose(1, 2)) if query.dim() == 3 and (self is None or self.causal): tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF if key.is_cuda: tri = tri dot_products.data.sub_(tri.unsqueeze(0)) return matmul(self.dropout(F.softmax(dot_products / self.scale, dim =-1)), value) class MultiHead(nn.Module): def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False): super().__init__() self.attention = Attention(d_key, drop_ratio, causal=causal) self.wq = nn.Linear(d_key, d_key, bias=False) self.wk = nn.Linear(d_key, d_key, bias=False) self.wv = nn.Linear(d_value, d_value, bias=False) self.wo = nn.Linear(d_value, d_key, bias=False) self.n_heads = n_heads def forward(self, query, key, value): query, key, value = self.wq(query), self.wk(key), self.wv(value) query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key, value)) return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in zip(query, key, value)], -1)) class LayerNorm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class ResidualBlock(nn.Module): def __init__(self, layer, d_model, drop_ratio): super().__init__() self.layer = layer self.dropout = nn.Dropout(drop_ratio) self.layernorm = LayerNorm(d_model) def forward(self, *x): return self.layernorm(x[0] + self.dropout(self.layer(*x))) class EncoderLayerNew(nn.Module): def __init__(self, d_model, d_hidden, n_heads, drop_ratio): super().__init__() self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads, drop_ratio), d_model, drop_ratio) self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden), d_model, drop_ratio) def forward(self, input_0): primals_1 = self.selfattn.layer.wq.weight primals_3 = self.selfattn.layer.wk.weight primals_4 = self.selfattn.layer.wv.weight primals_5 = self.selfattn.layer.wo.weight primals_6 = self.selfattn.layernorm.gamma primals_7 = self.selfattn.layernorm.beta primals_8 = self.feedforward.layer.linear1.weight primals_9 = self.feedforward.layer.linear1.bias primals_10 = self.feedforward.layer.linear2.weight primals_11 = self.feedforward.layer.linear2.bias primals_12 = self.feedforward.layernorm.gamma primals_13 = self.feedforward.layernorm.beta primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
bhubanendra-mishra/dense-video-cap
EncoderLayer
false
14,955
[ "BSD-3-Clause" ]
174
43914e17769701b9cf98eda203ae4c465b315fab
https://github.com/bhubanendra-mishra/dense-video-cap/tree/43914e17769701b9cf98eda203ae4c465b315fab
UNet
import torch import torch.nn as nn import torch.nn.functional as F def Conv(in_channels, out_channels): return nn.Conv2d(in_channels, out_channels, 3, padding=1) def concat(a, b): return torch.cat((a, b), 1) def pool(x): return F.max_pool2d(x, 2, 2) def relu(x): return F.relu(x, inplace=True) def upsample(x): return F.interpolate(x, scale_factor=2, mode='nearest') class UNet(nn.Module): def __init__(self, in_channels=3, out_channels=3): super(UNet, self).__init__() ic = in_channels ec1 = 32 ec2 = 48 ec3 = 64 ec4 = 80 ec5 = 96 dc4 = 112 dc3 = 96 dc2 = 64 dc1a = 64 dc1b = 32 oc = out_channels self.enc_conv0 = Conv(ic, ec1) self.enc_conv1 = Conv(ec1, ec1) self.enc_conv2 = Conv(ec1, ec2) self.enc_conv3 = Conv(ec2, ec3) self.enc_conv4 = Conv(ec3, ec4) self.enc_conv5a = Conv(ec4, ec5) self.enc_conv5b = Conv(ec5, ec5) self.dec_conv4a = Conv(ec5 + ec3, dc4) self.dec_conv4b = Conv(dc4, dc4) self.dec_conv3a = Conv(dc4 + ec2, dc3) self.dec_conv3b = Conv(dc3, dc3) self.dec_conv2a = Conv(dc3 + ec1, dc2) self.dec_conv2b = Conv(dc2, dc2) self.dec_conv1a = Conv(dc2 + ic, dc1a) self.dec_conv1b = Conv(dc1a, dc1b) self.dec_conv0 = Conv(dc1b, oc) self.alignment = 16 def forward(self, input): x = relu(self.enc_conv0(input)) x = relu(self.enc_conv1(x)) x = pool1 = pool(x) x = relu(self.enc_conv2(x)) x = pool2 = pool(x) x = relu(self.enc_conv3(x)) x = pool3 = pool(x) x = relu(self.enc_conv4(x)) x = pool(x) x = relu(self.enc_conv5a(x)) x = relu(self.enc_conv5b(x)) x = upsample(x) x = concat(x, pool3) x = relu(self.dec_conv4a(x)) x = relu(self.dec_conv4b(x)) x = upsample(x) x = concat(x, pool2) x = relu(self.dec_conv3a(x)) x = relu(self.dec_conv3b(x)) x = upsample(x) x = concat(x, pool1) x = relu(self.dec_conv2a(x)) x = relu(self.dec_conv2b(x)) x = upsample(x) x = concat(x, input) x = relu(self.dec_conv1a(x)) x = relu(self.dec_conv1b(x)) x = self.dec_conv0(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 48 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 80 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_9(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 64 % 160 x1 = xindex // 8 % 8 x0 = xindex % 8 x3 = xindex // 10240 x4 = xindex % 64 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 96, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 4, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (tmp13 + 4 * tmp9 + 16 * x2 + 1536 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 160, tl.int64) tmp24 = tl.load(in_ptr3 + (x4 + 64 * (-96 + x2) + 4096 * x3), tmp21, other=0.0) tmp25 = tl.where(tmp4, tmp20, tmp24) tl.store(out_ptr0 + x5, tmp25, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 112 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 256 % 160 x1 = xindex // 16 % 16 x0 = xindex % 16 x3 = xindex // 40960 x4 = xindex % 256 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 112, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 8, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (tmp13 + 8 * tmp9 + 64 * x2 + 7168 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 160, tl.int64) tmp24 = tl.load(in_ptr3 + (x4 + 256 * (-112 + x2) + 12288 * x3), tmp21, other=0.0) tmp25 = tl.where(tmp4, tmp20, tmp24) tl.store(out_ptr0 + x5, tmp25, None) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 1024 % 128 x1 = xindex // 32 % 32 x0 = xindex % 32 x3 = xindex // 131072 x4 = xindex % 1024 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 96, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 16, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (tmp13 + 16 * tmp9 + 256 * x2 + 24576 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp24 = tl.load(in_ptr3 + (x4 + 1024 * (-96 + x2) + 32768 * x3), tmp21, other=0.0) tmp25 = tl.where(tmp4, tmp20, tmp24) tl.store(out_ptr0 + x5, tmp25, None) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_cat_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 4096 % 67 x1 = xindex // 64 % 64 x0 = xindex % 64 x3 = xindex // 274432 x4 = xindex % 4096 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([XBLOCK], 32, tl.int32) tmp7 = tmp5 + tmp6 tmp8 = tmp5 < 0 tmp9 = tl.where(tmp8, tmp7, tmp5) tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp10 + tmp6 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr1 + (tmp13 + 32 * tmp9 + 1024 * x2 + 65536 * x3), tmp4, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp16 = tmp14 + tmp15 tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 67, tl.int64) tmp24 = tl.load(in_ptr3 + (x4 + 4096 * (-64 + x2) + 12288 * x3), tmp21, other=0.0) tmp25 = tl.where(tmp4, tmp20, tmp24) tl.store(out_ptr0 + x5, tmp25, None) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 96 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 112 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_25(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 96 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33 ) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (48, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (48,), (1,)) assert_size_stride(primals_8, (64, 48, 3, 3), (432, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (80, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (80,), (1,)) assert_size_stride(primals_12, (96, 80, 3, 3), (720, 9, 3, 1)) assert_size_stride(primals_13, (96,), (1,)) assert_size_stride(primals_14, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_15, (96,), (1,)) assert_size_stride(primals_16, (112, 160, 3, 3), (1440, 9, 3, 1)) assert_size_stride(primals_17, (112,), (1,)) assert_size_stride(primals_18, (112, 112, 3, 3), (1008, 9, 3, 1)) assert_size_stride(primals_19, (112,), (1,)) assert_size_stride(primals_20, (96, 160, 3, 3), (1440, 9, 3, 1)) assert_size_stride(primals_21, (96,), (1,)) assert_size_stride(primals_22, (96, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_23, (96,), (1,)) assert_size_stride(primals_24, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_27, (64,), (1,)) assert_size_stride(primals_28, (64, 67, 3, 3), (603, 9, 3, 1)) assert_size_stride(primals_29, (64,), (1,)) assert_size_stride(primals_30, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_31, (32,), (1,)) assert_size_stride(primals_32, (3, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_33, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(524288)](buf3, primals_5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf3, buf4, buf5, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 48, 32, 32), (49152, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(196608)](buf7, primals_7, 196608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf8 = empty_strided_cuda((4, 48, 16, 16), (12288, 256, 16, 1), torch.float32) buf9 = empty_strided_cuda((4, 48, 16, 16), (12288, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(49152)](buf7, buf8, buf9, 49152, XBLOCK=256, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 16, 16), (16384, 256, 16, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_4[grid(65536)](buf11, primals_9, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch. float32) buf13 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf11, buf12, buf13, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf14 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 80, 8, 8), (5120, 64, 8, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_6[grid(20480)](buf15, primals_11, 20480, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf16 = empty_strided_cuda((4, 80, 4, 4), (1280, 16, 4, 1), torch. float32) buf17 = empty_strided_cuda((4, 80, 4, 4), (1280, 16, 4, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(5120)](buf15, buf16, buf17, 5120, XBLOCK=128, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 96, 4, 4), (1536, 16, 4, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_8[grid(6144)](buf19, primals_13, 6144, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf20 = extern_kernels.convolution(buf19, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 96, 4, 4), (1536, 16, 4, 1)) buf21 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_9[grid(8)](buf21, 8, XBLOCK=8, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4, 160, 8, 8), (10240, 64, 8, 1), torch .float32) triton_poi_fused_cat_10[grid(40960)](buf21, buf20, primals_15, buf12, buf22, 40960, XBLOCK=256, num_warps=4, num_stages=1) buf23 = extern_kernels.convolution(buf22, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 112, 8, 8), (7168, 64, 8, 1)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_11[grid(28672)](buf24, primals_17, 28672, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf25 = extern_kernels.convolution(buf24, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 112, 8, 8), (7168, 64, 8, 1)) buf26 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_12[grid(16)](buf26, 16, XBLOCK=16, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((4, 160, 16, 16), (40960, 256, 16, 1), torch.float32) triton_poi_fused_cat_13[grid(163840)](buf26, buf25, primals_19, buf8, buf27, 163840, XBLOCK=512, num_warps=8, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 96, 16, 16), (24576, 256, 16, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_14[grid(98304)](buf29, primals_21, 98304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf30 = extern_kernels.convolution(buf29, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 96, 16, 16), (24576, 256, 16, 1)) buf31 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_15[grid(32)](buf31, 32, XBLOCK=32, num_warps=1, num_stages=1) buf32 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) triton_poi_fused_cat_16[grid(524288)](buf31, buf30, primals_23, buf4, buf32, 524288, XBLOCK=1024, num_warps=4, num_stages=1) buf33 = extern_kernels.convolution(buf32, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf34 = buf33 del buf33 triton_poi_fused_convolution_relu_17[grid(262144)](buf34, primals_25, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf35 = extern_kernels.convolution(buf34, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf36 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_18[grid(64)](buf36, 64, XBLOCK=64, num_warps=1, num_stages=1) buf37 = empty_strided_cuda((4, 67, 64, 64), (274432, 4096, 64, 1), torch.float32) triton_poi_fused_cat_19[grid(1097728)](buf36, buf35, primals_27, primals_3, buf37, 1097728, XBLOCK=1024, num_warps=4, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_20[grid(1048576)](buf39, primals_29, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf40 = extern_kernels.convolution(buf39, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf41 = buf40 del buf40 triton_poi_fused_convolution_relu_0[grid(524288)](buf41, primals_31, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf42 = extern_kernels.convolution(buf41, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_21[grid(49152)](buf43, primals_33, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_33 buf44 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_22[grid(262144)]( buf35, primals_27, buf44, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf35 del primals_27 buf45 = empty_strided_cuda((4, 96, 16, 16), (24576, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_23[grid(98304)]( buf30, primals_23, buf45, 98304, XBLOCK=1024, num_warps=4, num_stages=1) del buf30 del primals_23 buf46 = empty_strided_cuda((4, 112, 8, 8), (7168, 64, 8, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_24[grid(28672)]( buf25, primals_19, buf46, 28672, XBLOCK=128, num_warps=4, num_stages=1) del buf25 del primals_19 buf47 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_25[grid(6144)]( buf20, primals_15, buf47, 6144, XBLOCK=256, num_warps=4, num_stages=1) del buf20 del primals_15 return (buf43, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, buf1, buf3, buf4, buf5, buf7, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf24, buf26, buf27, buf29, buf31, buf32, buf34, buf36, buf37, buf39, buf41, buf44, buf45, buf46, buf47) def Conv(in_channels, out_channels): return nn.Conv2d(in_channels, out_channels, 3, padding=1) def concat(a, b): return torch.cat((a, b), 1) def pool(x): return F.max_pool2d(x, 2, 2) def relu(x): return F.relu(x, inplace=True) def upsample(x): return F.interpolate(x, scale_factor=2, mode='nearest') class UNetNew(nn.Module): def __init__(self, in_channels=3, out_channels=3): super(UNetNew, self).__init__() ic = in_channels ec1 = 32 ec2 = 48 ec3 = 64 ec4 = 80 ec5 = 96 dc4 = 112 dc3 = 96 dc2 = 64 dc1a = 64 dc1b = 32 oc = out_channels self.enc_conv0 = Conv(ic, ec1) self.enc_conv1 = Conv(ec1, ec1) self.enc_conv2 = Conv(ec1, ec2) self.enc_conv3 = Conv(ec2, ec3) self.enc_conv4 = Conv(ec3, ec4) self.enc_conv5a = Conv(ec4, ec5) self.enc_conv5b = Conv(ec5, ec5) self.dec_conv4a = Conv(ec5 + ec3, dc4) self.dec_conv4b = Conv(dc4, dc4) self.dec_conv3a = Conv(dc4 + ec2, dc3) self.dec_conv3b = Conv(dc3, dc3) self.dec_conv2a = Conv(dc3 + ec1, dc2) self.dec_conv2b = Conv(dc2, dc2) self.dec_conv1a = Conv(dc2 + ic, dc1a) self.dec_conv1b = Conv(dc1a, dc1b) self.dec_conv0 = Conv(dc1b, oc) self.alignment = 16 def forward(self, input_0): primals_1 = self.enc_conv0.weight primals_2 = self.enc_conv0.bias primals_4 = self.enc_conv1.weight primals_5 = self.enc_conv1.bias primals_6 = self.enc_conv2.weight primals_7 = self.enc_conv2.bias primals_8 = self.enc_conv3.weight primals_9 = self.enc_conv3.bias primals_10 = self.enc_conv4.weight primals_11 = self.enc_conv4.bias primals_12 = self.enc_conv5a.weight primals_13 = self.enc_conv5a.bias primals_14 = self.enc_conv5b.weight primals_15 = self.enc_conv5b.bias primals_16 = self.dec_conv4a.weight primals_17 = self.dec_conv4a.bias primals_18 = self.dec_conv4b.weight primals_19 = self.dec_conv4b.bias primals_20 = self.dec_conv3a.weight primals_21 = self.dec_conv3a.bias primals_22 = self.dec_conv3b.weight primals_23 = self.dec_conv3b.bias primals_24 = self.dec_conv2a.weight primals_25 = self.dec_conv2a.bias primals_26 = self.dec_conv2b.weight primals_27 = self.dec_conv2b.bias primals_28 = self.dec_conv1a.weight primals_29 = self.dec_conv1a.bias primals_30 = self.dec_conv1b.weight primals_31 = self.dec_conv1b.bias primals_32 = self.dec_conv0.weight primals_33 = self.dec_conv0.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33]) return output[0]
arpan-dhatt/oidn
UNet
false
14,956
[ "Apache-2.0" ]
1,206
9419411ba4b343b475b53587cadd44c83d68dc2a
https://github.com/arpan-dhatt/oidn/tree/9419411ba4b343b475b53587cadd44c83d68dc2a
SparsemaxBisect
from torch.autograd import Function import torch import torch.nn as nn def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True): """sparsemax: normalizing sparse transform (a la softmax), via bisection. Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply sparsemax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. ensure_sum_one : bool, Whether to divide the result by its sum. If false, the result might sum to close but not exactly 1, which might cause downstream problems. Note: This function does not yet support normalizing along anything except the last dimension. Please use transposing and views to achieve more general behavior. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one) class EntmaxBisectFunction(Function): @classmethod def _gp(cls, x, alpha): return x ** (alpha - 1) @classmethod def _gp_inv(cls, y, alpha): return y ** (1 / (alpha - 1)) @classmethod def _p(cls, X, alpha): return cls._gp_inv(torch.clamp(X, min=0), alpha) @classmethod def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True ): if not isinstance(alpha, torch.Tensor): alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device) alpha_shape = list(X.shape) alpha_shape[dim] = 1 alpha = alpha.expand(*alpha_shape) ctx.alpha = alpha ctx.dim = dim d = X.shape[dim] X = X * (alpha - 1) max_val, _ = X.max(dim=dim, keepdim=True) tau_lo = max_val - cls._gp(1, alpha) tau_hi = max_val - cls._gp(1 / d, alpha) f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1 dm = tau_hi - tau_lo for it in range(n_iter): dm /= 2 tau_m = tau_lo + dm p_m = cls._p(X - tau_m, alpha) f_m = p_m.sum(dim) - 1 mask = (f_m * f_lo >= 0).unsqueeze(dim) tau_lo = torch.where(mask, tau_m, tau_lo) if ensure_sum_one: p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim) ctx.save_for_backward(p_m) return p_m @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1)) dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr d_alpha = None if ctx.needs_input_grad[1]: S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1)) ent = S.sum(ctx.dim).unsqueeze(ctx.dim) Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim) d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2 d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1) d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim) return dX, d_alpha, None, None, None class SparsemaxBisectFunction(EntmaxBisectFunction): @classmethod def _gp(cls, x, alpha): return x @classmethod def _gp_inv(cls, y, alpha): return y @classmethod def _p(cls, x, alpha): return torch.clamp(x, min=0) @classmethod def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True): return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50, ensure_sum_one=True) @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = Y > 0 dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr return dX, None, None, None class SparsemaxBisect(nn.Module): def __init__(self, dim=-1, n_iter=None): """sparsemax: normalizing sparse transform (a la softmax) via bisection Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- dim : int The dimension along which to apply sparsemax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. """ self.dim = dim self.n_iter = n_iter super().__init__() def forward(self, X): return sparsemax_bisect(X, dim=self.dim, n_iter=self.n_iter) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.autograd import Function import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0(in_out_ptr8, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp9 = 0.25 tmp10 = tmp6 - tmp9 tmp11 = tmp10 - tmp8 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp14 = tmp8 + tmp13 tmp15 = tmp0 - tmp14 tmp16 = 0.0 tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp18 = tmp1 - tmp14 tmp19 = triton_helpers.maximum(tmp18, tmp16) tmp20 = tmp17 + tmp19 tmp21 = tmp3 - tmp14 tmp22 = triton_helpers.maximum(tmp21, tmp16) tmp23 = tmp20 + tmp22 tmp24 = tmp5 - tmp14 tmp25 = triton_helpers.maximum(tmp24, tmp16) tmp26 = tmp23 + tmp25 tmp27 = tmp26 - tmp7 tmp28 = tmp0 - tmp8 tmp29 = triton_helpers.maximum(tmp28, tmp16) tmp30 = tmp1 - tmp8 tmp31 = triton_helpers.maximum(tmp30, tmp16) tmp32 = tmp29 + tmp31 tmp33 = tmp3 - tmp8 tmp34 = triton_helpers.maximum(tmp33, tmp16) tmp35 = tmp32 + tmp34 tmp36 = tmp5 - tmp8 tmp37 = triton_helpers.maximum(tmp36, tmp16) tmp38 = tmp35 + tmp37 tmp39 = tmp38 - tmp7 tmp40 = tmp27 * tmp39 tmp41 = tmp40 >= tmp16 tmp42 = tl.where(tmp41, tmp14, tmp8) tmp43 = tmp13 * tmp12 tmp44 = tmp42 + tmp43 tmp45 = tmp0 - tmp44 tmp46 = triton_helpers.maximum(tmp45, tmp16) tmp47 = tmp1 - tmp44 tmp48 = triton_helpers.maximum(tmp47, tmp16) tmp49 = tmp46 + tmp48 tmp50 = tmp3 - tmp44 tmp51 = triton_helpers.maximum(tmp50, tmp16) tmp52 = tmp49 + tmp51 tmp53 = tmp5 - tmp44 tmp54 = triton_helpers.maximum(tmp53, tmp16) tmp55 = tmp52 + tmp54 tmp56 = tmp55 - tmp7 tmp57 = tmp56 * tmp39 tmp58 = tmp57 >= tmp16 tmp59 = tl.where(tmp58, tmp44, tmp42) tmp60 = tmp43 * tmp12 tmp61 = tmp59 + tmp60 tmp62 = tmp0 - tmp61 tmp63 = triton_helpers.maximum(tmp62, tmp16) tmp64 = tmp1 - tmp61 tmp65 = triton_helpers.maximum(tmp64, tmp16) tmp66 = tmp63 + tmp65 tmp67 = tmp3 - tmp61 tmp68 = triton_helpers.maximum(tmp67, tmp16) tmp69 = tmp66 + tmp68 tmp70 = tmp5 - tmp61 tmp71 = triton_helpers.maximum(tmp70, tmp16) tmp72 = tmp69 + tmp71 tmp73 = tmp72 - tmp7 tmp74 = tmp73 * tmp39 tmp75 = tmp74 >= tmp16 tmp76 = tl.where(tmp75, tmp61, tmp59) tmp77 = tmp60 * tmp12 tmp78 = tmp76 + tmp77 tmp79 = tmp0 - tmp78 tmp80 = triton_helpers.maximum(tmp79, tmp16) tmp81 = tmp1 - tmp78 tmp82 = triton_helpers.maximum(tmp81, tmp16) tmp83 = tmp80 + tmp82 tmp84 = tmp3 - tmp78 tmp85 = triton_helpers.maximum(tmp84, tmp16) tmp86 = tmp83 + tmp85 tmp87 = tmp5 - tmp78 tmp88 = triton_helpers.maximum(tmp87, tmp16) tmp89 = tmp86 + tmp88 tmp90 = tmp89 - tmp7 tmp91 = tmp90 * tmp39 tmp92 = tmp91 >= tmp16 tmp93 = tl.where(tmp92, tmp78, tmp76) tmp94 = tmp77 * tmp12 tmp95 = tmp93 + tmp94 tmp96 = tmp0 - tmp95 tmp97 = triton_helpers.maximum(tmp96, tmp16) tmp98 = tmp1 - tmp95 tmp99 = triton_helpers.maximum(tmp98, tmp16) tmp100 = tmp97 + tmp99 tmp101 = tmp3 - tmp95 tmp102 = triton_helpers.maximum(tmp101, tmp16) tmp103 = tmp100 + tmp102 tmp104 = tmp5 - tmp95 tmp105 = triton_helpers.maximum(tmp104, tmp16) tmp106 = tmp103 + tmp105 tmp107 = tmp106 - tmp7 tmp108 = tmp107 * tmp39 tmp109 = tmp108 >= tmp16 tmp110 = tl.where(tmp109, tmp95, tmp93) tmp111 = tmp94 * tmp12 tmp112 = tmp110 + tmp111 tmp113 = tmp0 - tmp112 tmp114 = triton_helpers.maximum(tmp113, tmp16) tmp115 = tmp1 - tmp112 tmp116 = triton_helpers.maximum(tmp115, tmp16) tmp117 = tmp114 + tmp116 tmp118 = tmp3 - tmp112 tmp119 = triton_helpers.maximum(tmp118, tmp16) tmp120 = tmp117 + tmp119 tmp121 = tmp5 - tmp112 tmp122 = triton_helpers.maximum(tmp121, tmp16) tmp123 = tmp120 + tmp122 tmp124 = tmp123 - tmp7 tmp125 = tmp124 * tmp39 tmp126 = tmp125 >= tmp16 tmp127 = tl.where(tmp126, tmp112, tmp110) tmp128 = tmp111 * tmp12 tmp129 = tmp127 + tmp128 tmp130 = tmp0 - tmp129 tmp131 = triton_helpers.maximum(tmp130, tmp16) tmp132 = tmp1 - tmp129 tmp133 = triton_helpers.maximum(tmp132, tmp16) tmp134 = tmp131 + tmp133 tmp135 = tmp3 - tmp129 tmp136 = triton_helpers.maximum(tmp135, tmp16) tmp137 = tmp134 + tmp136 tmp138 = tmp5 - tmp129 tmp139 = triton_helpers.maximum(tmp138, tmp16) tmp140 = tmp137 + tmp139 tmp141 = tmp140 - tmp7 tmp142 = tmp141 * tmp39 tmp143 = tmp142 >= tmp16 tmp144 = tl.where(tmp143, tmp129, tmp127) tmp145 = tmp128 * tmp12 tmp146 = tmp144 + tmp145 tmp147 = tmp0 - tmp146 tmp148 = triton_helpers.maximum(tmp147, tmp16) tmp149 = tmp1 - tmp146 tmp150 = triton_helpers.maximum(tmp149, tmp16) tmp151 = tmp148 + tmp150 tmp152 = tmp3 - tmp146 tmp153 = triton_helpers.maximum(tmp152, tmp16) tmp154 = tmp151 + tmp153 tmp155 = tmp5 - tmp146 tmp156 = triton_helpers.maximum(tmp155, tmp16) tmp157 = tmp154 + tmp156 tmp158 = tmp157 - tmp7 tmp159 = tmp158 * tmp39 tmp160 = tmp159 >= tmp16 tmp161 = tl.where(tmp160, tmp146, tmp144) tmp162 = tmp145 * tmp12 tmp163 = tmp161 + tmp162 tmp164 = tmp0 - tmp163 tmp165 = triton_helpers.maximum(tmp164, tmp16) tmp166 = tmp1 - tmp163 tmp167 = triton_helpers.maximum(tmp166, tmp16) tmp168 = tmp165 + tmp167 tmp169 = tmp3 - tmp163 tmp170 = triton_helpers.maximum(tmp169, tmp16) tmp171 = tmp168 + tmp170 tmp172 = tmp5 - tmp163 tmp173 = triton_helpers.maximum(tmp172, tmp16) tmp174 = tmp171 + tmp173 tmp175 = tmp174 - tmp7 tmp176 = tmp175 * tmp39 tmp177 = tmp176 >= tmp16 tmp178 = tl.where(tmp177, tmp163, tmp161) tmp179 = tmp162 * tmp12 tmp180 = tmp178 + tmp179 tmp181 = tmp0 - tmp180 tmp182 = triton_helpers.maximum(tmp181, tmp16) tmp183 = tmp1 - tmp180 tmp184 = triton_helpers.maximum(tmp183, tmp16) tmp185 = tmp182 + tmp184 tmp186 = tmp3 - tmp180 tmp187 = triton_helpers.maximum(tmp186, tmp16) tmp188 = tmp185 + tmp187 tmp189 = tmp5 - tmp180 tmp190 = triton_helpers.maximum(tmp189, tmp16) tmp191 = tmp188 + tmp190 tmp192 = tmp191 - tmp7 tmp193 = tmp192 * tmp39 tmp194 = tmp193 >= tmp16 tmp195 = tl.where(tmp194, tmp180, tmp178) tmp196 = tmp179 * tmp12 tmp197 = tmp195 + tmp196 tmp198 = tmp0 - tmp197 tmp199 = triton_helpers.maximum(tmp198, tmp16) tmp200 = tmp1 - tmp197 tmp201 = triton_helpers.maximum(tmp200, tmp16) tmp202 = tmp199 + tmp201 tmp203 = tmp3 - tmp197 tmp204 = triton_helpers.maximum(tmp203, tmp16) tmp205 = tmp202 + tmp204 tmp206 = tmp5 - tmp197 tmp207 = triton_helpers.maximum(tmp206, tmp16) tmp208 = tmp205 + tmp207 tmp209 = tmp208 - tmp7 tmp210 = tmp209 * tmp39 tmp211 = tmp210 >= tmp16 tmp212 = tl.where(tmp211, tmp197, tmp195) tmp213 = tmp196 * tmp12 tmp214 = tmp212 + tmp213 tmp215 = tmp0 - tmp214 tmp216 = triton_helpers.maximum(tmp215, tmp16) tmp217 = tmp1 - tmp214 tmp218 = triton_helpers.maximum(tmp217, tmp16) tmp219 = tmp216 + tmp218 tmp220 = tmp3 - tmp214 tmp221 = triton_helpers.maximum(tmp220, tmp16) tmp222 = tmp219 + tmp221 tmp223 = tmp5 - tmp214 tmp224 = triton_helpers.maximum(tmp223, tmp16) tmp225 = tmp222 + tmp224 tmp226 = tmp225 - tmp7 tmp227 = tmp226 * tmp39 tmp228 = tmp227 >= tmp16 tmp229 = tl.where(tmp228, tmp214, tmp212) tmp230 = tmp213 * tmp12 tmp231 = tmp229 + tmp230 tmp232 = tmp0 - tmp231 tmp233 = triton_helpers.maximum(tmp232, tmp16) tmp234 = tmp1 - tmp231 tmp235 = triton_helpers.maximum(tmp234, tmp16) tmp236 = tmp233 + tmp235 tmp237 = tmp3 - tmp231 tmp238 = triton_helpers.maximum(tmp237, tmp16) tmp239 = tmp236 + tmp238 tmp240 = tmp5 - tmp231 tmp241 = triton_helpers.maximum(tmp240, tmp16) tmp242 = tmp239 + tmp241 tmp243 = tmp242 - tmp7 tmp244 = tmp243 * tmp39 tmp245 = tmp244 >= tmp16 tmp246 = tl.where(tmp245, tmp231, tmp229) tmp247 = tmp230 * tmp12 tmp248 = tmp246 + tmp247 tmp249 = tmp0 - tmp248 tmp250 = triton_helpers.maximum(tmp249, tmp16) tmp251 = tmp1 - tmp248 tmp252 = triton_helpers.maximum(tmp251, tmp16) tmp253 = tmp250 + tmp252 tmp254 = tmp3 - tmp248 tmp255 = triton_helpers.maximum(tmp254, tmp16) tmp256 = tmp253 + tmp255 tmp257 = tmp5 - tmp248 tmp258 = triton_helpers.maximum(tmp257, tmp16) tmp259 = tmp256 + tmp258 tmp260 = tmp259 - tmp7 tmp261 = tmp260 * tmp39 tmp262 = tmp261 >= tmp16 tmp263 = tl.where(tmp262, tmp248, tmp246) tmp264 = tmp247 * tmp12 tmp265 = tmp263 + tmp264 tmp266 = tmp0 - tmp265 tmp267 = triton_helpers.maximum(tmp266, tmp16) tmp268 = tmp1 - tmp265 tmp269 = triton_helpers.maximum(tmp268, tmp16) tmp270 = tmp267 + tmp269 tmp271 = tmp3 - tmp265 tmp272 = triton_helpers.maximum(tmp271, tmp16) tmp273 = tmp270 + tmp272 tmp274 = tmp5 - tmp265 tmp275 = triton_helpers.maximum(tmp274, tmp16) tmp276 = tmp273 + tmp275 tmp277 = tmp276 - tmp7 tmp278 = tmp277 * tmp39 tmp279 = tmp278 >= tmp16 tmp280 = tl.where(tmp279, tmp265, tmp263) tmp281 = tmp264 * tmp12 tmp282 = tmp280 + tmp281 tmp283 = tmp0 - tmp282 tmp284 = triton_helpers.maximum(tmp283, tmp16) tmp285 = tmp1 - tmp282 tmp286 = triton_helpers.maximum(tmp285, tmp16) tmp287 = tmp284 + tmp286 tmp288 = tmp3 - tmp282 tmp289 = triton_helpers.maximum(tmp288, tmp16) tmp290 = tmp287 + tmp289 tmp291 = tmp5 - tmp282 tmp292 = triton_helpers.maximum(tmp291, tmp16) tmp293 = tmp290 + tmp292 tmp294 = tmp293 - tmp7 tmp295 = tmp294 * tmp39 tmp296 = tmp295 >= tmp16 tmp297 = tl.where(tmp296, tmp282, tmp280) tmp298 = tmp281 * tmp12 tmp299 = tmp297 + tmp298 tmp300 = tmp0 - tmp299 tmp301 = triton_helpers.maximum(tmp300, tmp16) tmp302 = tmp1 - tmp299 tmp303 = triton_helpers.maximum(tmp302, tmp16) tmp304 = tmp301 + tmp303 tmp305 = tmp3 - tmp299 tmp306 = triton_helpers.maximum(tmp305, tmp16) tmp307 = tmp304 + tmp306 tmp308 = tmp5 - tmp299 tmp309 = triton_helpers.maximum(tmp308, tmp16) tmp310 = tmp307 + tmp309 tmp311 = tmp310 - tmp7 tmp312 = tmp311 * tmp39 tmp313 = tmp312 >= tmp16 tmp314 = tl.where(tmp313, tmp299, tmp297) tl.store(in_out_ptr8 + x0, tmp314, xmask) @triton.jit def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1(in_out_ptr16, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + x0, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 0.25 tmp8 = tmp6 - tmp7 tmp9 = 1.0 tmp10 = tmp6 - tmp9 tmp11 = tmp8 - tmp10 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp14 = tmp13 * tmp12 tmp15 = tmp14 * tmp12 tmp16 = tmp15 * tmp12 tmp17 = tmp16 * tmp12 tmp18 = tmp17 * tmp12 tmp19 = tmp18 * tmp12 tmp20 = tmp19 * tmp12 tmp21 = tmp20 * tmp12 tmp22 = tmp21 * tmp12 tmp23 = tmp22 * tmp12 tmp24 = tmp23 * tmp12 tmp25 = tmp24 * tmp12 tmp26 = tmp25 * tmp12 tmp27 = tmp26 * tmp12 tmp28 = tmp27 * tmp12 tmp29 = tmp28 * tmp12 tmp30 = tmp29 * tmp12 tmp32 = tmp31 + tmp30 tmp33 = tmp0 - tmp32 tmp34 = 0.0 tmp35 = triton_helpers.maximum(tmp33, tmp34) tmp36 = tmp1 - tmp32 tmp37 = triton_helpers.maximum(tmp36, tmp34) tmp38 = tmp35 + tmp37 tmp39 = tmp3 - tmp32 tmp40 = triton_helpers.maximum(tmp39, tmp34) tmp41 = tmp38 + tmp40 tmp42 = tmp5 - tmp32 tmp43 = triton_helpers.maximum(tmp42, tmp34) tmp44 = tmp41 + tmp43 tmp45 = tmp44 - tmp9 tmp46 = tmp0 - tmp10 tmp47 = triton_helpers.maximum(tmp46, tmp34) tmp48 = tmp1 - tmp10 tmp49 = triton_helpers.maximum(tmp48, tmp34) tmp50 = tmp47 + tmp49 tmp51 = tmp3 - tmp10 tmp52 = triton_helpers.maximum(tmp51, tmp34) tmp53 = tmp50 + tmp52 tmp54 = tmp5 - tmp10 tmp55 = triton_helpers.maximum(tmp54, tmp34) tmp56 = tmp53 + tmp55 tmp57 = tmp56 - tmp9 tmp58 = tmp45 * tmp57 tmp59 = tmp58 >= tmp34 tmp60 = tl.where(tmp59, tmp32, tmp31) tmp61 = tmp30 * tmp12 tmp62 = tmp60 + tmp61 tmp63 = tmp0 - tmp62 tmp64 = triton_helpers.maximum(tmp63, tmp34) tmp65 = tmp1 - tmp62 tmp66 = triton_helpers.maximum(tmp65, tmp34) tmp67 = tmp64 + tmp66 tmp68 = tmp3 - tmp62 tmp69 = triton_helpers.maximum(tmp68, tmp34) tmp70 = tmp67 + tmp69 tmp71 = tmp5 - tmp62 tmp72 = triton_helpers.maximum(tmp71, tmp34) tmp73 = tmp70 + tmp72 tmp74 = tmp73 - tmp9 tmp75 = tmp74 * tmp57 tmp76 = tmp75 >= tmp34 tmp77 = tl.where(tmp76, tmp62, tmp60) tmp78 = tmp61 * tmp12 tmp79 = tmp77 + tmp78 tmp80 = tmp0 - tmp79 tmp81 = triton_helpers.maximum(tmp80, tmp34) tmp82 = tmp1 - tmp79 tmp83 = triton_helpers.maximum(tmp82, tmp34) tmp84 = tmp81 + tmp83 tmp85 = tmp3 - tmp79 tmp86 = triton_helpers.maximum(tmp85, tmp34) tmp87 = tmp84 + tmp86 tmp88 = tmp5 - tmp79 tmp89 = triton_helpers.maximum(tmp88, tmp34) tmp90 = tmp87 + tmp89 tmp91 = tmp90 - tmp9 tmp92 = tmp91 * tmp57 tmp93 = tmp92 >= tmp34 tmp94 = tl.where(tmp93, tmp79, tmp77) tmp95 = tmp78 * tmp12 tmp96 = tmp94 + tmp95 tmp97 = tmp0 - tmp96 tmp98 = triton_helpers.maximum(tmp97, tmp34) tmp99 = tmp1 - tmp96 tmp100 = triton_helpers.maximum(tmp99, tmp34) tmp101 = tmp98 + tmp100 tmp102 = tmp3 - tmp96 tmp103 = triton_helpers.maximum(tmp102, tmp34) tmp104 = tmp101 + tmp103 tmp105 = tmp5 - tmp96 tmp106 = triton_helpers.maximum(tmp105, tmp34) tmp107 = tmp104 + tmp106 tmp108 = tmp107 - tmp9 tmp109 = tmp108 * tmp57 tmp110 = tmp109 >= tmp34 tmp111 = tl.where(tmp110, tmp96, tmp94) tmp112 = tmp95 * tmp12 tmp113 = tmp111 + tmp112 tmp114 = tmp0 - tmp113 tmp115 = triton_helpers.maximum(tmp114, tmp34) tmp116 = tmp1 - tmp113 tmp117 = triton_helpers.maximum(tmp116, tmp34) tmp118 = tmp115 + tmp117 tmp119 = tmp3 - tmp113 tmp120 = triton_helpers.maximum(tmp119, tmp34) tmp121 = tmp118 + tmp120 tmp122 = tmp5 - tmp113 tmp123 = triton_helpers.maximum(tmp122, tmp34) tmp124 = tmp121 + tmp123 tmp125 = tmp124 - tmp9 tmp126 = tmp125 * tmp57 tmp127 = tmp126 >= tmp34 tmp128 = tl.where(tmp127, tmp113, tmp111) tmp129 = tmp112 * tmp12 tmp130 = tmp128 + tmp129 tmp131 = tmp0 - tmp130 tmp132 = triton_helpers.maximum(tmp131, tmp34) tmp133 = tmp1 - tmp130 tmp134 = triton_helpers.maximum(tmp133, tmp34) tmp135 = tmp132 + tmp134 tmp136 = tmp3 - tmp130 tmp137 = triton_helpers.maximum(tmp136, tmp34) tmp138 = tmp135 + tmp137 tmp139 = tmp5 - tmp130 tmp140 = triton_helpers.maximum(tmp139, tmp34) tmp141 = tmp138 + tmp140 tmp142 = tmp141 - tmp9 tmp143 = tmp142 * tmp57 tmp144 = tmp143 >= tmp34 tmp145 = tl.where(tmp144, tmp130, tmp128) tmp146 = tmp129 * tmp12 tmp147 = tmp145 + tmp146 tmp148 = tmp0 - tmp147 tmp149 = triton_helpers.maximum(tmp148, tmp34) tmp150 = tmp1 - tmp147 tmp151 = triton_helpers.maximum(tmp150, tmp34) tmp152 = tmp149 + tmp151 tmp153 = tmp3 - tmp147 tmp154 = triton_helpers.maximum(tmp153, tmp34) tmp155 = tmp152 + tmp154 tmp156 = tmp5 - tmp147 tmp157 = triton_helpers.maximum(tmp156, tmp34) tmp158 = tmp155 + tmp157 tmp159 = tmp158 - tmp9 tmp160 = tmp159 * tmp57 tmp161 = tmp160 >= tmp34 tmp162 = tl.where(tmp161, tmp147, tmp145) tmp163 = tmp146 * tmp12 tmp164 = tmp162 + tmp163 tmp165 = tmp0 - tmp164 tmp166 = triton_helpers.maximum(tmp165, tmp34) tmp167 = tmp1 - tmp164 tmp168 = triton_helpers.maximum(tmp167, tmp34) tmp169 = tmp166 + tmp168 tmp170 = tmp3 - tmp164 tmp171 = triton_helpers.maximum(tmp170, tmp34) tmp172 = tmp169 + tmp171 tmp173 = tmp5 - tmp164 tmp174 = triton_helpers.maximum(tmp173, tmp34) tmp175 = tmp172 + tmp174 tmp176 = tmp175 - tmp9 tmp177 = tmp176 * tmp57 tmp178 = tmp177 >= tmp34 tmp179 = tl.where(tmp178, tmp164, tmp162) tmp180 = tmp163 * tmp12 tmp181 = tmp179 + tmp180 tmp182 = tmp0 - tmp181 tmp183 = triton_helpers.maximum(tmp182, tmp34) tmp184 = tmp1 - tmp181 tmp185 = triton_helpers.maximum(tmp184, tmp34) tmp186 = tmp183 + tmp185 tmp187 = tmp3 - tmp181 tmp188 = triton_helpers.maximum(tmp187, tmp34) tmp189 = tmp186 + tmp188 tmp190 = tmp5 - tmp181 tmp191 = triton_helpers.maximum(tmp190, tmp34) tmp192 = tmp189 + tmp191 tmp193 = tmp192 - tmp9 tmp194 = tmp193 * tmp57 tmp195 = tmp194 >= tmp34 tmp196 = tl.where(tmp195, tmp181, tmp179) tmp197 = tmp180 * tmp12 tmp198 = tmp196 + tmp197 tmp199 = tmp0 - tmp198 tmp200 = triton_helpers.maximum(tmp199, tmp34) tmp201 = tmp1 - tmp198 tmp202 = triton_helpers.maximum(tmp201, tmp34) tmp203 = tmp200 + tmp202 tmp204 = tmp3 - tmp198 tmp205 = triton_helpers.maximum(tmp204, tmp34) tmp206 = tmp203 + tmp205 tmp207 = tmp5 - tmp198 tmp208 = triton_helpers.maximum(tmp207, tmp34) tmp209 = tmp206 + tmp208 tmp210 = tmp209 - tmp9 tmp211 = tmp210 * tmp57 tmp212 = tmp211 >= tmp34 tmp213 = tl.where(tmp212, tmp198, tmp196) tmp214 = tmp197 * tmp12 tmp215 = tmp213 + tmp214 tmp216 = tmp0 - tmp215 tmp217 = triton_helpers.maximum(tmp216, tmp34) tmp218 = tmp1 - tmp215 tmp219 = triton_helpers.maximum(tmp218, tmp34) tmp220 = tmp217 + tmp219 tmp221 = tmp3 - tmp215 tmp222 = triton_helpers.maximum(tmp221, tmp34) tmp223 = tmp220 + tmp222 tmp224 = tmp5 - tmp215 tmp225 = triton_helpers.maximum(tmp224, tmp34) tmp226 = tmp223 + tmp225 tmp227 = tmp226 - tmp9 tmp228 = tmp227 * tmp57 tmp229 = tmp228 >= tmp34 tmp230 = tl.where(tmp229, tmp215, tmp213) tmp231 = tmp214 * tmp12 tmp232 = tmp230 + tmp231 tmp233 = tmp0 - tmp232 tmp234 = triton_helpers.maximum(tmp233, tmp34) tmp235 = tmp1 - tmp232 tmp236 = triton_helpers.maximum(tmp235, tmp34) tmp237 = tmp234 + tmp236 tmp238 = tmp3 - tmp232 tmp239 = triton_helpers.maximum(tmp238, tmp34) tmp240 = tmp237 + tmp239 tmp241 = tmp5 - tmp232 tmp242 = triton_helpers.maximum(tmp241, tmp34) tmp243 = tmp240 + tmp242 tmp244 = tmp243 - tmp9 tmp245 = tmp244 * tmp57 tmp246 = tmp245 >= tmp34 tmp247 = tl.where(tmp246, tmp232, tmp230) tmp248 = tmp231 * tmp12 tmp249 = tmp247 + tmp248 tmp250 = tmp0 - tmp249 tmp251 = triton_helpers.maximum(tmp250, tmp34) tmp252 = tmp1 - tmp249 tmp253 = triton_helpers.maximum(tmp252, tmp34) tmp254 = tmp251 + tmp253 tmp255 = tmp3 - tmp249 tmp256 = triton_helpers.maximum(tmp255, tmp34) tmp257 = tmp254 + tmp256 tmp258 = tmp5 - tmp249 tmp259 = triton_helpers.maximum(tmp258, tmp34) tmp260 = tmp257 + tmp259 tmp261 = tmp260 - tmp9 tmp262 = tmp261 * tmp57 tmp263 = tmp262 >= tmp34 tmp264 = tl.where(tmp263, tmp249, tmp247) tmp265 = tmp248 * tmp12 tmp266 = tmp264 + tmp265 tmp267 = tmp0 - tmp266 tmp268 = triton_helpers.maximum(tmp267, tmp34) tmp269 = tmp1 - tmp266 tmp270 = triton_helpers.maximum(tmp269, tmp34) tmp271 = tmp268 + tmp270 tmp272 = tmp3 - tmp266 tmp273 = triton_helpers.maximum(tmp272, tmp34) tmp274 = tmp271 + tmp273 tmp275 = tmp5 - tmp266 tmp276 = triton_helpers.maximum(tmp275, tmp34) tmp277 = tmp274 + tmp276 tmp278 = tmp277 - tmp9 tmp279 = tmp278 * tmp57 tmp280 = tmp279 >= tmp34 tmp281 = tl.where(tmp280, tmp266, tmp264) tmp282 = tmp265 * tmp12 tmp283 = tmp281 + tmp282 tmp284 = tmp0 - tmp283 tmp285 = triton_helpers.maximum(tmp284, tmp34) tmp286 = tmp1 - tmp283 tmp287 = triton_helpers.maximum(tmp286, tmp34) tmp288 = tmp285 + tmp287 tmp289 = tmp3 - tmp283 tmp290 = triton_helpers.maximum(tmp289, tmp34) tmp291 = tmp288 + tmp290 tmp292 = tmp5 - tmp283 tmp293 = triton_helpers.maximum(tmp292, tmp34) tmp294 = tmp291 + tmp293 tmp295 = tmp294 - tmp9 tmp296 = tmp295 * tmp57 tmp297 = tmp296 >= tmp34 tmp298 = tl.where(tmp297, tmp283, tmp281) tmp299 = tmp282 * tmp12 tmp300 = tmp298 + tmp299 tmp301 = tmp0 - tmp300 tmp302 = triton_helpers.maximum(tmp301, tmp34) tmp303 = tmp1 - tmp300 tmp304 = triton_helpers.maximum(tmp303, tmp34) tmp305 = tmp302 + tmp304 tmp306 = tmp3 - tmp300 tmp307 = triton_helpers.maximum(tmp306, tmp34) tmp308 = tmp305 + tmp307 tmp309 = tmp5 - tmp300 tmp310 = triton_helpers.maximum(tmp309, tmp34) tmp311 = tmp308 + tmp310 tmp312 = tmp311 - tmp9 tmp313 = tmp312 * tmp57 tmp314 = tmp313 >= tmp34 tmp315 = tl.where(tmp314, tmp300, tmp298) tmp316 = tmp299 * tmp12 tmp317 = tmp315 + tmp316 tmp318 = tmp0 - tmp317 tmp319 = triton_helpers.maximum(tmp318, tmp34) tmp320 = tmp1 - tmp317 tmp321 = triton_helpers.maximum(tmp320, tmp34) tmp322 = tmp319 + tmp321 tmp323 = tmp3 - tmp317 tmp324 = triton_helpers.maximum(tmp323, tmp34) tmp325 = tmp322 + tmp324 tmp326 = tmp5 - tmp317 tmp327 = triton_helpers.maximum(tmp326, tmp34) tmp328 = tmp325 + tmp327 tmp329 = tmp328 - tmp9 tmp330 = tmp329 * tmp57 tmp331 = tmp330 >= tmp34 tmp332 = tl.where(tmp331, tmp317, tmp315) tmp333 = tmp316 * tmp12 tmp334 = tmp332 + tmp333 tmp335 = tmp0 - tmp334 tmp336 = triton_helpers.maximum(tmp335, tmp34) tmp337 = tmp1 - tmp334 tmp338 = triton_helpers.maximum(tmp337, tmp34) tmp339 = tmp336 + tmp338 tmp340 = tmp3 - tmp334 tmp341 = triton_helpers.maximum(tmp340, tmp34) tmp342 = tmp339 + tmp341 tmp343 = tmp5 - tmp334 tmp344 = triton_helpers.maximum(tmp343, tmp34) tmp345 = tmp342 + tmp344 tmp346 = tmp345 - tmp9 tmp347 = tmp346 * tmp57 tmp348 = tmp347 >= tmp34 tmp349 = tl.where(tmp348, tmp334, tmp332) tmp350 = tmp333 * tmp12 tmp351 = tmp349 + tmp350 tmp352 = tmp0 - tmp351 tmp353 = triton_helpers.maximum(tmp352, tmp34) tmp354 = tmp1 - tmp351 tmp355 = triton_helpers.maximum(tmp354, tmp34) tmp356 = tmp353 + tmp355 tmp357 = tmp3 - tmp351 tmp358 = triton_helpers.maximum(tmp357, tmp34) tmp359 = tmp356 + tmp358 tmp360 = tmp5 - tmp351 tmp361 = triton_helpers.maximum(tmp360, tmp34) tmp362 = tmp359 + tmp361 tmp363 = tmp362 - tmp9 tmp364 = tmp363 * tmp57 tmp365 = tmp364 >= tmp34 tmp366 = tl.where(tmp365, tmp351, tmp349) tmp367 = tmp350 * tmp12 tmp368 = tmp366 + tmp367 tmp369 = tmp0 - tmp368 tmp370 = triton_helpers.maximum(tmp369, tmp34) tmp371 = tmp1 - tmp368 tmp372 = triton_helpers.maximum(tmp371, tmp34) tmp373 = tmp370 + tmp372 tmp374 = tmp3 - tmp368 tmp375 = triton_helpers.maximum(tmp374, tmp34) tmp376 = tmp373 + tmp375 tmp377 = tmp5 - tmp368 tmp378 = triton_helpers.maximum(tmp377, tmp34) tmp379 = tmp376 + tmp378 tmp380 = tmp379 - tmp9 tmp381 = tmp380 * tmp57 tmp382 = tmp381 >= tmp34 tmp383 = tl.where(tmp382, tmp368, tmp366) tmp384 = tmp367 * tmp12 tmp385 = tmp383 + tmp384 tmp386 = tmp0 - tmp385 tmp387 = triton_helpers.maximum(tmp386, tmp34) tmp388 = tmp1 - tmp385 tmp389 = triton_helpers.maximum(tmp388, tmp34) tmp390 = tmp387 + tmp389 tmp391 = tmp3 - tmp385 tmp392 = triton_helpers.maximum(tmp391, tmp34) tmp393 = tmp390 + tmp392 tmp394 = tmp5 - tmp385 tmp395 = triton_helpers.maximum(tmp394, tmp34) tmp396 = tmp393 + tmp395 tmp397 = tmp396 - tmp9 tmp398 = tmp397 * tmp57 tmp399 = tmp398 >= tmp34 tmp400 = tl.where(tmp399, tmp385, tmp383) tmp401 = tmp384 * tmp12 tmp402 = tmp400 + tmp401 tmp403 = tmp0 - tmp402 tmp404 = triton_helpers.maximum(tmp403, tmp34) tmp405 = tmp1 - tmp402 tmp406 = triton_helpers.maximum(tmp405, tmp34) tmp407 = tmp404 + tmp406 tmp408 = tmp3 - tmp402 tmp409 = triton_helpers.maximum(tmp408, tmp34) tmp410 = tmp407 + tmp409 tmp411 = tmp5 - tmp402 tmp412 = triton_helpers.maximum(tmp411, tmp34) tmp413 = tmp410 + tmp412 tmp414 = tmp413 - tmp9 tmp415 = tmp414 * tmp57 tmp416 = tmp415 >= tmp34 tmp417 = tl.where(tmp416, tmp402, tmp400) tmp418 = tmp401 * tmp12 tmp419 = tmp417 + tmp418 tmp420 = tmp0 - tmp419 tmp421 = triton_helpers.maximum(tmp420, tmp34) tmp422 = tmp1 - tmp419 tmp423 = triton_helpers.maximum(tmp422, tmp34) tmp424 = tmp421 + tmp423 tmp425 = tmp3 - tmp419 tmp426 = triton_helpers.maximum(tmp425, tmp34) tmp427 = tmp424 + tmp426 tmp428 = tmp5 - tmp419 tmp429 = triton_helpers.maximum(tmp428, tmp34) tmp430 = tmp427 + tmp429 tmp431 = tmp430 - tmp9 tmp432 = tmp431 * tmp57 tmp433 = tmp432 >= tmp34 tmp434 = tl.where(tmp433, tmp419, tmp417) tl.store(out_ptr0 + x0, tmp30, xmask) tl.store(in_out_ptr16 + x0, tmp434, xmask) @triton.jit def triton_poi_fused_add_clamp_div_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tmp6 = tmp5 * tmp3 tmp7 = tmp6 * tmp3 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp3 tmp10 = tmp9 * tmp3 tmp11 = tmp10 * tmp3 tmp12 = tmp11 * tmp3 tmp13 = tmp12 * tmp3 tmp14 = tmp13 * tmp3 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp3 tmp17 = tmp16 * tmp3 tmp18 = tmp17 * tmp3 tmp19 = tmp18 * tmp3 tmp20 = tmp19 * tmp3 tmp21 = tmp20 * tmp3 tmp22 = tmp21 * tmp3 tmp23 = tmp22 * tmp3 tmp24 = tmp23 * tmp3 tmp25 = tmp24 * tmp3 tmp26 = tmp25 * tmp3 tmp27 = tmp1 + tmp26 tmp28 = tmp0 - tmp27 tmp29 = 0.0 tmp30 = triton_helpers.maximum(tmp28, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_out_ptr0 + x0, xmask) tmp33 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp15 - tmp7 tmp17 = tmp9 - tmp16 tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp10 - tmp16 tmp21 = triton_helpers.maximum(tmp20, tmp18) tmp22 = tmp19 + tmp21 tmp23 = tmp12 - tmp16 tmp24 = triton_helpers.maximum(tmp23, tmp18) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp16 tmp27 = triton_helpers.maximum(tmp26, tmp18) tmp28 = tmp25 + tmp27 tmp29 = tmp28 - tmp7 tmp30 = tmp8 * tmp29 tmp31 = tmp30 >= tmp18 tmp34 = 0.5 tmp35 = tmp33 * tmp34 tmp36 = tmp35 * tmp34 tmp37 = tmp36 * tmp34 tmp38 = tmp37 * tmp34 tmp39 = tmp38 * tmp34 tmp40 = tmp39 * tmp34 tmp41 = tmp40 * tmp34 tmp42 = tmp41 * tmp34 tmp43 = tmp42 * tmp34 tmp44 = tmp43 * tmp34 tmp45 = tmp44 * tmp34 tmp46 = tmp45 * tmp34 tmp47 = tmp46 * tmp34 tmp48 = tmp47 * tmp34 tmp49 = tmp48 * tmp34 tmp50 = tmp49 * tmp34 tmp51 = tmp50 * tmp34 tmp52 = tmp51 * tmp34 tmp53 = tmp52 * tmp34 tmp54 = tmp53 * tmp34 tmp55 = tmp54 * tmp34 tmp56 = tmp55 * tmp34 tmp57 = tmp56 * tmp34 tmp58 = tmp32 + tmp57 tmp59 = tl.where(tmp31, tmp58, tmp32) tl.store(in_out_ptr0 + x0, tmp59, xmask) @triton.jit def triton_poi_fused_add_clamp_div_sub_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tmp6 = tmp5 * tmp3 tmp7 = tmp6 * tmp3 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp3 tmp10 = tmp9 * tmp3 tmp11 = tmp10 * tmp3 tmp12 = tmp11 * tmp3 tmp13 = tmp12 * tmp3 tmp14 = tmp13 * tmp3 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp3 tmp17 = tmp16 * tmp3 tmp18 = tmp17 * tmp3 tmp19 = tmp18 * tmp3 tmp20 = tmp19 * tmp3 tmp21 = tmp20 * tmp3 tmp22 = tmp21 * tmp3 tmp23 = tmp22 * tmp3 tmp24 = tmp23 * tmp3 tmp25 = tmp24 * tmp3 tmp26 = tmp25 * tmp3 tmp27 = tmp26 * tmp3 tmp28 = tmp1 + tmp27 tmp29 = tmp0 - tmp28 tmp30 = 0.0 tmp31 = triton_helpers.maximum(tmp29, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_out_ptr0 + x0, xmask) tmp33 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp15 - tmp7 tmp17 = tmp9 - tmp16 tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp10 - tmp16 tmp21 = triton_helpers.maximum(tmp20, tmp18) tmp22 = tmp19 + tmp21 tmp23 = tmp12 - tmp16 tmp24 = triton_helpers.maximum(tmp23, tmp18) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp16 tmp27 = triton_helpers.maximum(tmp26, tmp18) tmp28 = tmp25 + tmp27 tmp29 = tmp28 - tmp7 tmp30 = tmp8 * tmp29 tmp31 = tmp30 >= tmp18 tmp34 = 0.5 tmp35 = tmp33 * tmp34 tmp36 = tmp35 * tmp34 tmp37 = tmp36 * tmp34 tmp38 = tmp37 * tmp34 tmp39 = tmp38 * tmp34 tmp40 = tmp39 * tmp34 tmp41 = tmp40 * tmp34 tmp42 = tmp41 * tmp34 tmp43 = tmp42 * tmp34 tmp44 = tmp43 * tmp34 tmp45 = tmp44 * tmp34 tmp46 = tmp45 * tmp34 tmp47 = tmp46 * tmp34 tmp48 = tmp47 * tmp34 tmp49 = tmp48 * tmp34 tmp50 = tmp49 * tmp34 tmp51 = tmp50 * tmp34 tmp52 = tmp51 * tmp34 tmp53 = tmp52 * tmp34 tmp54 = tmp53 * tmp34 tmp55 = tmp54 * tmp34 tmp56 = tmp55 * tmp34 tmp57 = tmp56 * tmp34 tmp58 = tmp57 * tmp34 tmp59 = tmp32 + tmp58 tmp60 = tl.where(tmp31, tmp59, tmp32) tl.store(in_out_ptr0 + x0, tmp60, xmask) @triton.jit def triton_poi_fused_add_div_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tmp6 = tmp5 * tmp3 tmp7 = tmp6 * tmp3 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp3 tmp10 = tmp9 * tmp3 tmp11 = tmp10 * tmp3 tmp12 = tmp11 * tmp3 tmp13 = tmp12 * tmp3 tmp14 = tmp13 * tmp3 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp3 tmp17 = tmp16 * tmp3 tmp18 = tmp17 * tmp3 tmp19 = tmp18 * tmp3 tmp20 = tmp19 * tmp3 tmp21 = tmp20 * tmp3 tmp22 = tmp21 * tmp3 tmp23 = tmp22 * tmp3 tmp24 = tmp23 * tmp3 tmp25 = tmp24 * tmp3 tmp26 = tmp25 * tmp3 tmp27 = tmp26 * tmp3 tmp28 = tmp27 * tmp3 tmp29 = tmp1 + tmp28 tmp30 = tmp0 - tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_out_ptr0 + x0, xmask) tmp37 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp20 - tmp12 tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp1) tmp24 = tmp15 - tmp21 tmp25 = triton_helpers.maximum(tmp24, tmp1) tmp26 = tmp23 + tmp25 tmp27 = tmp17 - tmp21 tmp28 = triton_helpers.maximum(tmp27, tmp1) tmp29 = tmp26 + tmp28 tmp30 = tmp19 - tmp21 tmp31 = triton_helpers.maximum(tmp30, tmp1) tmp32 = tmp29 + tmp31 tmp33 = tmp32 - tmp12 tmp34 = tmp13 * tmp33 tmp35 = tmp34 >= tmp1 tmp38 = 0.5 tmp39 = tmp37 * tmp38 tmp40 = tmp39 * tmp38 tmp41 = tmp40 * tmp38 tmp42 = tmp41 * tmp38 tmp43 = tmp42 * tmp38 tmp44 = tmp43 * tmp38 tmp45 = tmp44 * tmp38 tmp46 = tmp45 * tmp38 tmp47 = tmp46 * tmp38 tmp48 = tmp47 * tmp38 tmp49 = tmp48 * tmp38 tmp50 = tmp49 * tmp38 tmp51 = tmp50 * tmp38 tmp52 = tmp51 * tmp38 tmp53 = tmp52 * tmp38 tmp54 = tmp53 * tmp38 tmp55 = tmp54 * tmp38 tmp56 = tmp55 * tmp38 tmp57 = tmp56 * tmp38 tmp58 = tmp57 * tmp38 tmp59 = tmp58 * tmp38 tmp60 = tmp59 * tmp38 tmp61 = tmp60 * tmp38 tmp62 = tmp61 * tmp38 tmp63 = tmp62 * tmp38 tmp64 = tmp36 + tmp63 tmp65 = tl.where(tmp35, tmp64, tmp36) tl.store(in_out_ptr0 + x0, tmp65, xmask) @triton.jit def triton_poi_fused_add_div_sub_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tmp6 = tmp5 * tmp3 tmp7 = tmp6 * tmp3 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp3 tmp10 = tmp9 * tmp3 tmp11 = tmp10 * tmp3 tmp12 = tmp11 * tmp3 tmp13 = tmp12 * tmp3 tmp14 = tmp13 * tmp3 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp3 tmp17 = tmp16 * tmp3 tmp18 = tmp17 * tmp3 tmp19 = tmp18 * tmp3 tmp20 = tmp19 * tmp3 tmp21 = tmp20 * tmp3 tmp22 = tmp21 * tmp3 tmp23 = tmp22 * tmp3 tmp24 = tmp23 * tmp3 tmp25 = tmp24 * tmp3 tmp26 = tmp25 * tmp3 tmp27 = tmp26 * tmp3 tmp28 = tmp27 * tmp3 tmp29 = tmp28 * tmp3 tmp30 = tmp1 + tmp29 tmp31 = tmp0 - tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr4, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_out_ptr0 + x0, xmask) tmp37 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp20 - tmp12 tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp1) tmp24 = tmp15 - tmp21 tmp25 = triton_helpers.maximum(tmp24, tmp1) tmp26 = tmp23 + tmp25 tmp27 = tmp17 - tmp21 tmp28 = triton_helpers.maximum(tmp27, tmp1) tmp29 = tmp26 + tmp28 tmp30 = tmp19 - tmp21 tmp31 = triton_helpers.maximum(tmp30, tmp1) tmp32 = tmp29 + tmp31 tmp33 = tmp32 - tmp12 tmp34 = tmp13 * tmp33 tmp35 = tmp34 >= tmp1 tmp38 = 0.5 tmp39 = tmp37 * tmp38 tmp40 = tmp39 * tmp38 tmp41 = tmp40 * tmp38 tmp42 = tmp41 * tmp38 tmp43 = tmp42 * tmp38 tmp44 = tmp43 * tmp38 tmp45 = tmp44 * tmp38 tmp46 = tmp45 * tmp38 tmp47 = tmp46 * tmp38 tmp48 = tmp47 * tmp38 tmp49 = tmp48 * tmp38 tmp50 = tmp49 * tmp38 tmp51 = tmp50 * tmp38 tmp52 = tmp51 * tmp38 tmp53 = tmp52 * tmp38 tmp54 = tmp53 * tmp38 tmp55 = tmp54 * tmp38 tmp56 = tmp55 * tmp38 tmp57 = tmp56 * tmp38 tmp58 = tmp57 * tmp38 tmp59 = tmp58 * tmp38 tmp60 = tmp59 * tmp38 tmp61 = tmp60 * tmp38 tmp62 = tmp61 * tmp38 tmp63 = tmp62 * tmp38 tmp64 = tmp63 * tmp38 tmp65 = tmp36 + tmp64 tmp66 = tl.where(tmp35, tmp65, tmp36) tmp67 = tmp64 * tmp38 tmp68 = tmp66 + tmp67 tmp69 = tmp14 - tmp68 tmp70 = triton_helpers.maximum(tmp69, tmp1) tmp71 = tmp15 - tmp68 tmp72 = triton_helpers.maximum(tmp71, tmp1) tmp73 = tmp70 + tmp72 tmp74 = tmp17 - tmp68 tmp75 = triton_helpers.maximum(tmp74, tmp1) tmp76 = tmp73 + tmp75 tmp77 = tmp19 - tmp68 tmp78 = triton_helpers.maximum(tmp77, tmp1) tmp79 = tmp76 + tmp78 tmp80 = tmp79 - tmp12 tmp81 = tmp80 * tmp33 tmp82 = tmp81 >= tmp1 tmp83 = tl.where(tmp82, tmp68, tmp66) tmp84 = tmp67 * tmp38 tmp85 = tmp83 + tmp84 tmp86 = tmp14 - tmp85 tmp87 = triton_helpers.maximum(tmp86, tmp1) tmp88 = tmp15 - tmp85 tmp89 = triton_helpers.maximum(tmp88, tmp1) tmp90 = tmp87 + tmp89 tmp91 = tmp17 - tmp85 tmp92 = triton_helpers.maximum(tmp91, tmp1) tmp93 = tmp90 + tmp92 tmp94 = tmp19 - tmp85 tmp95 = triton_helpers.maximum(tmp94, tmp1) tmp96 = tmp93 + tmp95 tmp97 = tmp96 - tmp12 tmp98 = tmp97 * tmp33 tmp99 = tmp98 >= tmp1 tmp100 = tl.where(tmp99, tmp85, tmp83) tmp101 = tmp84 * tmp38 tmp102 = tmp100 + tmp101 tmp103 = tmp14 - tmp102 tmp104 = triton_helpers.maximum(tmp103, tmp1) tmp105 = tmp15 - tmp102 tmp106 = triton_helpers.maximum(tmp105, tmp1) tmp107 = tmp104 + tmp106 tmp108 = tmp17 - tmp102 tmp109 = triton_helpers.maximum(tmp108, tmp1) tmp110 = tmp107 + tmp109 tmp111 = tmp19 - tmp102 tmp112 = triton_helpers.maximum(tmp111, tmp1) tmp113 = tmp110 + tmp112 tmp114 = tmp113 - tmp12 tmp115 = tmp114 * tmp33 tmp116 = tmp115 >= tmp1 tmp117 = tl.where(tmp116, tmp102, tmp100) tmp118 = tmp101 * tmp38 tmp119 = tmp117 + tmp118 tmp120 = tmp14 - tmp119 tmp121 = triton_helpers.maximum(tmp120, tmp1) tmp122 = tmp15 - tmp119 tmp123 = triton_helpers.maximum(tmp122, tmp1) tmp124 = tmp121 + tmp123 tmp125 = tmp17 - tmp119 tmp126 = triton_helpers.maximum(tmp125, tmp1) tmp127 = tmp124 + tmp126 tmp128 = tmp19 - tmp119 tmp129 = triton_helpers.maximum(tmp128, tmp1) tmp130 = tmp127 + tmp129 tmp131 = tmp130 - tmp12 tmp132 = tmp131 * tmp33 tmp133 = tmp132 >= tmp1 tmp134 = tl.where(tmp133, tmp119, tmp117) tmp135 = tmp118 * tmp38 tmp136 = tmp134 + tmp135 tmp137 = tmp14 - tmp136 tmp138 = triton_helpers.maximum(tmp137, tmp1) tmp139 = tmp15 - tmp136 tmp140 = triton_helpers.maximum(tmp139, tmp1) tmp141 = tmp138 + tmp140 tmp142 = tmp17 - tmp136 tmp143 = triton_helpers.maximum(tmp142, tmp1) tmp144 = tmp141 + tmp143 tmp145 = tmp19 - tmp136 tmp146 = triton_helpers.maximum(tmp145, tmp1) tmp147 = tmp144 + tmp146 tmp148 = tmp147 - tmp12 tmp149 = tmp148 * tmp33 tmp150 = tmp149 >= tmp1 tmp151 = tl.where(tmp150, tmp136, tmp134) tmp152 = tmp135 * tmp38 tmp153 = tmp151 + tmp152 tmp154 = tmp14 - tmp153 tmp155 = triton_helpers.maximum(tmp154, tmp1) tmp156 = tmp15 - tmp153 tmp157 = triton_helpers.maximum(tmp156, tmp1) tmp158 = tmp155 + tmp157 tmp159 = tmp17 - tmp153 tmp160 = triton_helpers.maximum(tmp159, tmp1) tmp161 = tmp158 + tmp160 tmp162 = tmp19 - tmp153 tmp163 = triton_helpers.maximum(tmp162, tmp1) tmp164 = tmp161 + tmp163 tl.store(out_ptr4 + x0, tmp101, xmask) tl.store(in_out_ptr2 + x0, tmp151, xmask) tl.store(out_ptr7 + x0, tmp164, xmask) @triton.jit def triton_poi_fused_add_clamp_div_sub_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tmp6 = tmp5 * tmp3 tmp7 = tmp1 + tmp6 tmp8 = tmp0 - tmp7 tmp9 = 0.0 tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp12 = tmp10 / tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf41 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf40 get_raw_stream(0) triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0[grid(64)](buf41, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf82 = reinterpret_tensor(buf81, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf81 triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1[grid(64)](buf82, arg0_1, buf41, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) buf83 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_clamp_div_sub_2[grid(256)](arg0_1, buf82, buf42, buf83, 256, XBLOCK=256, num_warps=4, num_stages=1) buf85 = buf82 del buf82 triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3[grid(64)](buf85, buf83, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) buf86 = buf83 del buf83 triton_poi_fused_add_clamp_div_sub_4[grid(256)](arg0_1, buf85, buf42, buf86, 256, XBLOCK=256, num_warps=4, num_stages=1) buf88 = buf85 del buf85 triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5[grid(64)](buf88, buf86, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) buf89 = buf86 del buf86 triton_poi_fused_add_div_sub_6[grid(256)](arg0_1, buf88, buf42, buf89, 256, XBLOCK=256, num_warps=4, num_stages=1) buf91 = buf88 del buf88 triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7[grid(64)](buf91, buf89, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) buf92 = buf89 del buf89 triton_poi_fused_add_div_sub_8[grid(256)](arg0_1, buf91, buf42, buf92, 256, XBLOCK=128, num_warps=4, num_stages=1) buf94 = buf91 del buf91 buf100 = buf41 del buf41 buf103 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf104 = reinterpret_tensor(buf103, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf103 buf105 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9[grid(64)](buf94, buf104, buf92, arg0_1, buf42, buf100, buf105, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf42 del buf94 buf106 = buf92 del buf92 triton_poi_fused_add_clamp_div_sub_10[grid(256)](arg0_1, buf104, buf100, buf105, buf106, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf100 del buf104 del buf105 return buf106, def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True): """sparsemax: normalizing sparse transform (a la softmax), via bisection. Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply sparsemax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. ensure_sum_one : bool, Whether to divide the result by its sum. If false, the result might sum to close but not exactly 1, which might cause downstream problems. Note: This function does not yet support normalizing along anything except the last dimension. Please use transposing and views to achieve more general behavior. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one) class EntmaxBisectFunction(Function): @classmethod def _gp(cls, x, alpha): return x ** (alpha - 1) @classmethod def _gp_inv(cls, y, alpha): return y ** (1 / (alpha - 1)) @classmethod def _p(cls, X, alpha): return cls._gp_inv(torch.clamp(X, min=0), alpha) @classmethod def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True ): if not isinstance(alpha, torch.Tensor): alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device) alpha_shape = list(X.shape) alpha_shape[dim] = 1 alpha = alpha.expand(*alpha_shape) ctx.alpha = alpha ctx.dim = dim d = X.shape[dim] X = X * (alpha - 1) max_val, _ = X.max(dim=dim, keepdim=True) tau_lo = max_val - cls._gp(1, alpha) tau_hi = max_val - cls._gp(1 / d, alpha) f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1 dm = tau_hi - tau_lo for it in range(n_iter): dm /= 2 tau_m = tau_lo + dm p_m = cls._p(X - tau_m, alpha) f_m = p_m.sum(dim) - 1 mask = (f_m * f_lo >= 0).unsqueeze(dim) tau_lo = torch.where(mask, tau_m, tau_lo) if ensure_sum_one: p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim) ctx.save_for_backward(p_m) return p_m @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1)) dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr d_alpha = None if ctx.needs_input_grad[1]: S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1)) ent = S.sum(ctx.dim).unsqueeze(ctx.dim) Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim) d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2 d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1) d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim) return dX, d_alpha, None, None, None class SparsemaxBisectFunction(EntmaxBisectFunction): @classmethod def _gp(cls, x, alpha): return x @classmethod def _gp_inv(cls, y, alpha): return y @classmethod def _p(cls, x, alpha): return torch.clamp(x, min=0) @classmethod def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True): return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50, ensure_sum_one=True) @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = Y > 0 dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr return dX, None, None, None class SparsemaxBisectNew(nn.Module): def __init__(self, dim=-1, n_iter=None): """sparsemax: normalizing sparse transform (a la softmax) via bisection Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- dim : int The dimension along which to apply sparsemax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. """ self.dim = dim self.n_iter = n_iter super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
antoniogois/entmax
SparsemaxBisect
false
14,957
[ "MIT" ]
298
7ff3fa6b09ee53e04514173aacae9de90c95ca75
https://github.com/antoniogois/entmax/tree/7ff3fa6b09ee53e04514173aacae9de90c95ca75
FactorScalar
import torch import torch.nn as nn class FactorScalar(nn.Module): def __init__(self, initial_value=1.0, **kwargs): super().__init__() self.factor = nn.Parameter(torch.tensor(initial_value)) def on_task_end(self): pass def on_epoch_end(self): pass def forward(self, inputs): return self.factor * inputs def __mul__(self, other): return self.forward(other) def __rmul__(self, other): return self.forward(other) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class FactorScalarNew(nn.Module): def __init__(self, initial_value=1.0, **kwargs): super().__init__() self.factor = nn.Parameter(torch.tensor(initial_value)) def on_task_end(self): pass def on_epoch_end(self): pass def __mul__(self, other): return self.forward(other) def __rmul__(self, other): return self.forward(other) def forward(self, input_0): primals_1 = self.factor primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
billpsomas/incremental_learning.pytorch
FactorScalar
false
14,958
[ "MIT" ]
277
a401a6609fc61c74698739cf937c0ece1c10913f
https://github.com/billpsomas/incremental_learning.pytorch/tree/a401a6609fc61c74698739cf937c0ece1c10913f
InvertedFactorScalar
import torch import torch.nn as nn class InvertedFactorScalar(nn.Module): def __init__(self, initial_value=1.0, **kwargs): super().__init__() self._factor = nn.Parameter(torch.tensor(initial_value)) @property def factor(self): return 1 / (self._factor + 1e-07) def on_task_end(self): pass def on_epoch_end(self): pass def forward(self, inputs): return self.factor * inputs def __mul__(self, other): return self.forward(other) def __rmul__(self, other): return self.forward(other) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_reciprocal_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 1, tl.int32) tmp5 = tmp4 / tmp3 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_reciprocal_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf0, primals_1, primals_2 class InvertedFactorScalarNew(nn.Module): def __init__(self, initial_value=1.0, **kwargs): super().__init__() self._factor = nn.Parameter(torch.tensor(initial_value)) @property def factor(self): return 1 / (self._factor + 1e-07) def on_task_end(self): pass def on_epoch_end(self): pass def __mul__(self, other): return self.forward(other) def __rmul__(self, other): return self.forward(other) def forward(self, input_0): primals_1 = self._factor primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
billpsomas/incremental_learning.pytorch
InvertedFactorScalar
false
14,959
[ "MIT" ]
277
a401a6609fc61c74698739cf937c0ece1c10913f
https://github.com/billpsomas/incremental_learning.pytorch/tree/a401a6609fc61c74698739cf937c0ece1c10913f
MinibatchStdLayer
import torch import torch.nn as nn class MinibatchStdLayer(nn.Module): def __init__(self, group_size=4): super().__init__() self.group_size = group_size def forward(self, x): group_size = min(self.group_size, x.shape[0]) s = x.shape y = x.view([group_size, -1, s[1], s[2], s[3]]) y = y.float() y = y - torch.mean(y, dim=0, keepdim=True) y = torch.mean(y * y, dim=0) y = torch.sqrt(y + 1e-08) y = torch.mean(torch.mean(torch.mean(y, axis=3, keepdim=True), axis =2, keepdim=True), axis=1, keepdim=True) y = y.type(x.type()) y = y.repeat(group_size, 1, s[2], s[3]) return torch.cat([x, y], axis=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mean_mul_sqrt_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (64 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (128 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (192 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr0 + (65 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (129 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (193 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr0 + (66 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (130 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp52 = tl.load(in_ptr0 + (194 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp70 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp71 = tl.load(in_ptr0 + (67 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp73 = tl.load(in_ptr0 + (131 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp75 = tl.load(in_ptr0 + (195 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp31 = tmp30 / tmp7 tmp32 = tmp24 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp25 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp29 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp7 tmp44 = tmp43 + tmp21 tmp45 = libdevice.sqrt(tmp44) tmp46 = tmp23 + tmp45 tmp49 = tmp47 + tmp48 tmp51 = tmp49 + tmp50 tmp53 = tmp51 + tmp52 tmp54 = tmp53 / tmp7 tmp55 = tmp47 - tmp54 tmp56 = tmp55 * tmp55 tmp57 = tmp48 - tmp54 tmp58 = tmp57 * tmp57 tmp59 = tmp56 + tmp58 tmp60 = tmp50 - tmp54 tmp61 = tmp60 * tmp60 tmp62 = tmp59 + tmp61 tmp63 = tmp52 - tmp54 tmp64 = tmp63 * tmp63 tmp65 = tmp62 + tmp64 tmp66 = tmp65 / tmp7 tmp67 = tmp66 + tmp21 tmp68 = libdevice.sqrt(tmp67) tmp69 = tmp46 + tmp68 tmp72 = tmp70 + tmp71 tmp74 = tmp72 + tmp73 tmp76 = tmp74 + tmp75 tmp77 = tmp76 / tmp7 tmp78 = tmp70 - tmp77 tmp79 = tmp78 * tmp78 tmp80 = tmp71 - tmp77 tmp81 = tmp80 * tmp80 tmp82 = tmp79 + tmp81 tmp83 = tmp73 - tmp77 tmp84 = tmp83 * tmp83 tmp85 = tmp82 + tmp84 tmp86 = tmp75 - tmp77 tmp87 = tmp86 * tmp86 tmp88 = tmp85 + tmp87 tmp89 = tmp88 / tmp7 tmp90 = tmp89 + tmp21 tmp91 = libdevice.sqrt(tmp90) tmp92 = tmp69 + tmp91 tmp93 = tmp92 / tmp7 tl.store(out_ptr0 + x0, tmp93, xmask) @triton.jit def triton_per_fused_mean_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_mean_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 4.0 tmp3 = tmp1 / tmp2 tl.store(out_ptr0 + (x0 + 80 * x1), tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 1), (16, 4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_sqrt_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_per_fused_mean_1[grid(1)](buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 buf4 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_2[grid(256)](arg0_1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf3 = reinterpret_tensor(buf4, (4, 1, 4, 4), (80, 16, 4, 1), 64) triton_poi_fused_mean_repeat_3[grid(64)](buf1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf4, class MinibatchStdLayerNew(nn.Module): def __init__(self, group_size=4): super().__init__() self.group_size = group_size def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bg459/gan-ensembling-loader
MinibatchStdLayer
false
14,960
[ "MIT" ]
86
5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8
https://github.com/bg459/gan-ensembling-loader/tree/5ff6fae5fd5ced0a48ef2cd3dcb1d74aa1dadce8
LinearModel
import torch import torch.nn as nn class LinearModel(nn.Module): """Linear model applying on the logits alpha * x + beta. By default, this model is initialized as an identity operation. See https://arxiv.org/abs/1905.13260 for an example usage. :param alpha: A learned scalar. :param beta: A learned scalar. """ def __init__(self, alpha=1.0, beta=0.0): super().__init__() self.alpha = nn.Parameter(torch.tensor(alpha)) self.beta = nn.Parameter(torch.tensor(beta)) def forward(self, inputs): return self.alpha * inputs + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp1 * tmp2 tmp6 = tmp3 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class LinearModelNew(nn.Module): """Linear model applying on the logits alpha * x + beta. By default, this model is initialized as an identity operation. See https://arxiv.org/abs/1905.13260 for an example usage. :param alpha: A learned scalar. :param beta: A learned scalar. """ def __init__(self, alpha=1.0, beta=0.0): super().__init__() self.alpha = nn.Parameter(torch.tensor(alpha)) self.beta = nn.Parameter(torch.tensor(beta)) def forward(self, input_0): primals_1 = self.alpha primals_3 = self.beta primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
billpsomas/incremental_learning.pytorch
LinearModel
false
14,961
[ "MIT" ]
277
a401a6609fc61c74698739cf937c0ece1c10913f
https://github.com/billpsomas/incremental_learning.pytorch/tree/a401a6609fc61c74698739cf937c0ece1c10913f
WeightedL1Loss
import torch import numpy as np import torch.nn as nn class WeightedL1Loss(nn.Module): def __init__(self, code_weights: 'list'=None): """ Args: code_weights: (#codes) float list if not None. Code-wise weights. """ super(WeightedL1Loss, self).__init__() self.code_weights = code_weights if code_weights is not None: self.code_weights = np.array(code_weights, dtype=np.float32) self.code_weights = torch.from_numpy(self.code_weights) def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', weights: 'torch.Tensor'=None): """ Args: input: (B, #anchors, #codes) float tensor. Ecoded predicted locations of objects. target: (B, #anchors, #codes) float tensor. Regression targets. weights: (B, #anchors) float tensor if not None. Returns: loss: (B, #anchors) float tensor. Weighted smooth l1 loss without reduction. """ target = torch.where(torch.isnan(target), input, target) diff = input - target if self.code_weights is not None: diff = diff * self.code_weights.view(1, 1, -1) loss = torch.abs(diff) if weights is not None: assert weights.shape[0] == loss.shape[0] and weights.shape[1 ] == loss.shape[1] loss = loss * weights.unsqueeze(-1) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_isnan_sub_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.isnan(tmp1).to(tl.int1) tmp3 = tl.where(tmp2, tmp0, tmp1) tmp4 = tmp0 - tmp3 tmp5 = tl_math.abs(tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_isnan_sub_where_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class WeightedL1LossNew(nn.Module): def __init__(self, code_weights: 'list'=None): """ Args: code_weights: (#codes) float list if not None. Code-wise weights. """ super(WeightedL1LossNew, self).__init__() self.code_weights = code_weights if code_weights is not None: self.code_weights = np.array(code_weights, dtype=np.float32) self.code_weights = torch.from_numpy(self.code_weights) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
blakechen97/SASA
WeightedL1Loss
false
14,962
[ "Apache-2.0" ]
46
cd79f60e923242590b64cb0cc70203a524e7e9a7
https://github.com/blakechen97/SASA/tree/cd79f60e923242590b64cb0cc70203a524e7e9a7
TransposeLayer
import torch class TransposeLayer(torch.nn.Module): """Transpose the input.""" def forward(self, data): return data.t().contiguous() def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4, 4)](arg0_1, buf0, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return buf0, class TransposeLayerNew(torch.nn.Module): """Transpose the input.""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bolajiy/beer
TransposeLayer
false
14,963
[ "MIT" ]
46
6fe968c7ca4864437890aa6bd705755c2580696e
https://github.com/bolajiy/beer/tree/6fe968c7ca4864437890aa6bd705755c2580696e
WeightedBinaryCrossEntropyLoss
import torch import torch.nn as nn import torch.nn.functional as F class WeightedBinaryCrossEntropyLoss(nn.Module): """ Transform input to fit the fomation of PyTorch offical cross entropy loss with anchor-wise weighting. """ def __init__(self): super(WeightedBinaryCrossEntropyLoss, self).__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', weights: 'torch.Tensor'): """ Args: input: (B, #anchors, #classes) float tensor. Predited logits for each class. target: (B, #anchors, #classes) float tensor. One-hot classification targets. weights: (B, #anchors) float tensor. Anchor-wise weights. Returns: loss: (B, #anchors) float tensor. Weighted cross entropy loss without reduction """ loss = F.binary_cross_entropy_with_logits(input, target, reduction= 'none').mean(dim=-1) * weights return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_binary_cross_entropy_with_logits_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp39 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp14 = tmp1 - tmp13 tmp16 = tmp14 * tmp15 tmp17 = triton_helpers.minimum(tmp5, tmp15) tmp18 = tl_math.abs(tmp15) tmp19 = -tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = libdevice.log1p(tmp20) tmp22 = tmp17 - tmp21 tmp23 = tmp16 - tmp22 tmp24 = tmp12 + tmp23 tmp26 = tmp1 - tmp25 tmp28 = tmp26 * tmp27 tmp29 = triton_helpers.minimum(tmp5, tmp27) tmp30 = tl_math.abs(tmp27) tmp31 = -tmp30 tmp32 = tl_math.exp(tmp31) tmp33 = libdevice.log1p(tmp32) tmp34 = tmp29 - tmp33 tmp35 = tmp28 - tmp34 tmp36 = tmp24 + tmp35 tmp38 = tmp1 - tmp37 tmp40 = tmp38 * tmp39 tmp41 = triton_helpers.minimum(tmp5, tmp39) tmp42 = tl_math.abs(tmp39) tmp43 = -tmp42 tmp44 = tl_math.exp(tmp43) tmp45 = libdevice.log1p(tmp44) tmp46 = tmp41 - tmp45 tmp47 = tmp40 - tmp46 tmp48 = tmp36 + tmp47 tmp49 = 4.0 tmp50 = tmp48 / tmp49 tl.store(out_ptr0 + x0, tmp50, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_binary_cross_entropy_with_logits_mean_0[grid(64)]( arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_1[grid(256)](buf0, arg2_1, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg2_1 del buf0 return buf1, class WeightedBinaryCrossEntropyLossNew(nn.Module): """ Transform input to fit the fomation of PyTorch offical cross entropy loss with anchor-wise weighting. """ def __init__(self): super(WeightedBinaryCrossEntropyLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
blakechen97/SASA
WeightedBinaryCrossEntropyLoss
false
14,964
[ "Apache-2.0" ]
46
cd79f60e923242590b64cb0cc70203a524e7e9a7
https://github.com/blakechen97/SASA/tree/cd79f60e923242590b64cb0cc70203a524e7e9a7
ResidualConvUnit
import torch import torch.nn as nn import torch.fft import torch.utils.cpp_extension import torch.nn class ResidualConvUnit(nn.Module): def __init__(self, cin, activation, bn): super().__init__() self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1, bias=True) self.skip_add = nn.quantized.FloatFunctional() def forward(self, x): return self.skip_add.add(self.conv(x), x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cin': 4, 'activation': 4, 'bn': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.fft import torch.utils.cpp_extension import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class ResidualConvUnitNew(nn.Module): def __init__(self, cin, activation, bn): super().__init__() self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1, bias=True) self.skip_add = nn.quantized.FloatFunctional() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
autonomousvision/stylegan_xl
ResidualConvUnit
false
14,965
[ "MIT" ]
214
8c76531bcbf0931c295ecd1d32f75af998d1411f
https://github.com/autonomousvision/stylegan_xl/tree/8c76531bcbf0931c295ecd1d32f75af998d1411f
StandardizedConv2d
import torch import torch.nn as nn import torch.nn.functional as F class StandardizedConv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(StandardizedConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True ).mean(dim=3, keepdim=True) weight = weight - weight_mean std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1 ) + 1e-05 weight = weight / std.expand_as(weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp30 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x2, tmp36, xmask) @triton.jit def triton_per_fused_div_mean_std_sub_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tl.where(xmask, tmp11, 0) tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tl.full([XBLOCK, 1], 64, tl.int32) tmp19 = tmp18.to(tl.float32) tmp20 = tmp17 / tmp19 tmp21 = tmp11 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.sum(tmp25, 1)[:, None] tmp27 = 63.0 tmp28 = tmp26 / tmp27 tmp29 = libdevice.sqrt(tmp28) tmp30 = 1e-05 tmp31 = tmp29 + tmp30 tmp32 = tmp10 / tmp31 tl.store(out_ptr0 + (r1 + 64 * x0), tmp10, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp32, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf5 = buf3 del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_div_mean_std_sub_1[grid(4)](buf5, primals_1, buf0, buf1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(16)](buf8, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf8, primals_1, primals_3, buf5, buf6 class StandardizedConv2dNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(StandardizedConv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
blazejdolicki/vissl
StandardizedConv2d
false
14,966
[ "MIT" ]
2,512
9c10748a19fb1c637f32687142c8cd685f2410ff
https://github.com/blazejdolicki/vissl/tree/9c10748a19fb1c637f32687142c8cd685f2410ff
AdaptiveInstanceNorm
import torch import torch.nn as nn from math import sqrt def equal_lr(module, name='weight'): EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input): return self.linear(input) class AdaptiveInstanceNorm(nn.Module): def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.style = EqualLinear(style_dim, in_channel * 2) self.style.linear.bias.data[:in_channel] = 1 self.style.linear.bias.data[in_channel:] = 0 def forward(self, input, style): style = self.style(style).unsqueeze(2).unsqueeze(3) gamma, beta = style.chunk(2, 1) out = self.norm(input) out = gamma * out + beta return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.7071067811865476 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp24 = tmp22 + tmp23 tmp25 = tmp0 - tmp10 tmp26 = tmp25 * tmp21 tmp27 = tmp24 * tmp26 tmp30 = tmp28 + tmp29 tmp31 = tmp27 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4 ), 0), out=buf1) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit_add_mul_1[grid(16)](buf5, primals_4, buf1, primals_2, buf2, buf6, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf1 del primals_2 return buf6, buf0, primals_3, primals_4, buf2, buf5 def equal_lr(module, name='weight'): EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input): return self.linear(input) class AdaptiveInstanceNormNew(nn.Module): def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.style = EqualLinear(style_dim, in_channel * 2) self.style.linear.bias.data[:in_channel] = 1 self.style.linear.bias.data[in_channel:] = 0 def forward(self, input_0, input_1): primals_2 = self.style.linear.bias primals_1 = self.style.linear.weight_orig primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
blandocs/Tag2Pix
AdaptiveInstanceNorm
false
14,967
[ "MIT" ]
232
733d729067608dbe2c1122c9128f2f38bc0a8edd
https://github.com/blandocs/Tag2Pix/tree/733d729067608dbe2c1122c9128f2f38bc0a8edd
LabelSmoothingBCE
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed class LabelSmoothingBCE(nn.Module): def __init__(self, smoothing=0.0): super(LabelSmoothingBCE, self).__init__() self.criterion = nn.BCEWithLogitsLoss(reduction='none') self.confidence = 1.0 - smoothing self.smoothing = smoothing def forward(self, x, target): smooth_target = target.clone().masked_fill(target == 1, self.confidence ) smooth_target = smooth_target.masked_fill(target == 0, self.smoothing) return self.criterion(x, smooth_target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = tmp0 == tmp3 tmp5 = tl.where(tmp4, tmp3, tmp0) tmp6 = tl.where(tmp2, tmp1, tmp5) tmp7 = tmp3 - tmp6 tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.minimum(tmp1, tmp8) tmp11 = tl_math.abs(tmp8) tmp12 = -tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = libdevice.log1p(tmp13) tmp15 = tmp10 - tmp14 tmp16 = tmp9 - tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0[grid (256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class LabelSmoothingBCENew(nn.Module): def __init__(self, smoothing=0.0): super(LabelSmoothingBCENew, self).__init__() self.criterion = nn.BCEWithLogitsLoss(reduction='none') self.confidence = 1.0 - smoothing self.smoothing = smoothing def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
boldsort/craftassist
LabelSmoothingBCE
false
14,968
[ "MIT" ]
626
8058d115a250e30deb60d969b7b1a5fefd6e974c
https://github.com/boldsort/craftassist/tree/8058d115a250e30deb60d969b7b1a5fefd6e974c
NormalIsotropicCovarianceLayer
import abc import math import torch class ProbabilisticLayer(torch.nn.Module, metaclass=abc.ABCMeta): """Probabilistic layer to be used by the encoder/decoder of a Variational AutoEncoder. """ @abc.abstractmethod def forward(self, inputs): """Compute the parameters of the distribution conditioned on the input. Args: inputs (``torch.Tensor[N,dim]``): Conditional inputs. Returns: params (object): Parameters of the distribution. """ pass @abc.abstractmethod def samples_and_llh(self, params, use_mean=False): """Samples using the reparameterization trick so that the each sample can be backpropagated trough. In addition, returns the log-likelihood of the samples given the sampling distribution. Args: params (object): Parameters of the sampling distribution. use_mean (boolean): If true, by pass the sampling and just return the mean value of the distribution. Returns: samples (``torch.Tensor``): Sampled values. llh (``torch.Tensor``): Log-likelihood for each sample. """ pass @abc.abstractmethod def log_likelihood(self, data, params): """Log-likelihood of the data. Args: data (``torch.Tensor[N,dim]``): Data. params (object): Parameters of the distribution. """ pass class NormalDiagonalCovarianceLayer(ProbabilisticLayer): """Normal distribution with diagonal covariance matrix layer.""" def __init__(self, dim_in, dim_out, variance_nonlinearity=None): super().__init__() self.mean = torch.nn.Linear(dim_in, dim_out) self.logvar = torch.nn.Linear(dim_in, dim_out) if variance_nonlinearity is None: variance_nonlinearity = torch.nn.Softplus() self.variance_nonlinearity = variance_nonlinearity def forward(self, inputs): return self.mean(inputs), self.variance_nonlinearity(self.logvar( inputs)) def samples_and_llh(self, params, use_mean=False): means, variances = params if use_mean: samples = means else: dtype, device = means.dtype, means.device noise = torch.randn(*means.shape, dtype=dtype, device=device) std_dev = variances.sqrt() samples = means + std_dev * noise llhs = self.log_likelihood(samples, params) return samples, llhs def log_likelihood(self, data, params): means, variances = params dim = means.shape[-1] delta = torch.sum((data - means).pow(2) / variances, dim=-1) return -0.5 * (variances.log().sum(dim=-1) + delta + dim * math.log (2 * math.pi)) class NormalIsotropicCovarianceLayer(NormalDiagonalCovarianceLayer): """Normal distribution with isotropic covariance matrix layer.""" def __init__(self, dim_in, dim_out, variance_nonlinearity=None): super().__init__(dim_in, dim_out) self.mean = torch.nn.Linear(dim_in, dim_out) self.logvar = torch.nn.Linear(dim_in, 1) if variance_nonlinearity is None: variance_nonlinearity = torch.nn.Softplus() self.variance_nonlinearity = variance_nonlinearity def forward(self, inputs): means = self.mean(inputs) padding = torch.ones_like(means) variances = self.variance_nonlinearity(self.logvar(inputs)) * padding return means, variances def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import abc import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_ones_like_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 20.0 tmp4 = tmp2 > tmp3 tmp5 = tl_math.exp(tmp2) tmp6 = libdevice.log1p(tmp5) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp4, tmp0, tmp7) tmp9 = tmp8 * tmp1 tl.store(out_ptr0 + x2, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_ones_like_softplus_0[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class ProbabilisticLayer(torch.nn.Module, metaclass=abc.ABCMeta): """Probabilistic layer to be used by the encoder/decoder of a Variational AutoEncoder. """ @abc.abstractmethod def forward(self, inputs): """Compute the parameters of the distribution conditioned on the input. Args: inputs (``torch.Tensor[N,dim]``): Conditional inputs. Returns: params (object): Parameters of the distribution. """ pass @abc.abstractmethod def samples_and_llh(self, params, use_mean=False): """Samples using the reparameterization trick so that the each sample can be backpropagated trough. In addition, returns the log-likelihood of the samples given the sampling distribution. Args: params (object): Parameters of the sampling distribution. use_mean (boolean): If true, by pass the sampling and just return the mean value of the distribution. Returns: samples (``torch.Tensor``): Sampled values. llh (``torch.Tensor``): Log-likelihood for each sample. """ pass @abc.abstractmethod def log_likelihood(self, data, params): """Log-likelihood of the data. Args: data (``torch.Tensor[N,dim]``): Data. params (object): Parameters of the distribution. """ pass class NormalDiagonalCovarianceLayer(ProbabilisticLayer): """Normal distribution with diagonal covariance matrix layer.""" def __init__(self, dim_in, dim_out, variance_nonlinearity=None): super().__init__() self.mean = torch.nn.Linear(dim_in, dim_out) self.logvar = torch.nn.Linear(dim_in, dim_out) if variance_nonlinearity is None: variance_nonlinearity = torch.nn.Softplus() self.variance_nonlinearity = variance_nonlinearity def forward(self, inputs): return self.mean(inputs), self.variance_nonlinearity(self.logvar( inputs)) def samples_and_llh(self, params, use_mean=False): means, variances = params if use_mean: samples = means else: dtype, device = means.dtype, means.device noise = torch.randn(*means.shape, dtype=dtype, device=device) std_dev = variances.sqrt() samples = means + std_dev * noise llhs = self.log_likelihood(samples, params) return samples, llhs def log_likelihood(self, data, params): means, variances = params dim = means.shape[-1] delta = torch.sum((data - means).pow(2) / variances, dim=-1) return -0.5 * (variances.log().sum(dim=-1) + delta + dim * math.log (2 * math.pi)) class NormalIsotropicCovarianceLayerNew(NormalDiagonalCovarianceLayer): """Normal distribution with isotropic covariance matrix layer.""" def __init__(self, dim_in, dim_out, variance_nonlinearity=None): super().__init__(dim_in, dim_out) self.mean = torch.nn.Linear(dim_in, dim_out) self.logvar = torch.nn.Linear(dim_in, 1) if variance_nonlinearity is None: variance_nonlinearity = torch.nn.Softplus() self.variance_nonlinearity = variance_nonlinearity def forward(self, input_0): primals_1 = self.mean.weight primals_2 = self.mean.bias primals_4 = self.logvar.weight primals_5 = self.logvar.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
bolajiy/beer
NormalIsotropicCovarianceLayer
false
14,969
[ "MIT" ]
46
6fe968c7ca4864437890aa6bd705755c2580696e
https://github.com/bolajiy/beer/tree/6fe968c7ca4864437890aa6bd705755c2580696e
NormalDiagonalCovarianceLayer
import abc import math import torch class ProbabilisticLayer(torch.nn.Module, metaclass=abc.ABCMeta): """Probabilistic layer to be used by the encoder/decoder of a Variational AutoEncoder. """ @abc.abstractmethod def forward(self, inputs): """Compute the parameters of the distribution conditioned on the input. Args: inputs (``torch.Tensor[N,dim]``): Conditional inputs. Returns: params (object): Parameters of the distribution. """ pass @abc.abstractmethod def samples_and_llh(self, params, use_mean=False): """Samples using the reparameterization trick so that the each sample can be backpropagated trough. In addition, returns the log-likelihood of the samples given the sampling distribution. Args: params (object): Parameters of the sampling distribution. use_mean (boolean): If true, by pass the sampling and just return the mean value of the distribution. Returns: samples (``torch.Tensor``): Sampled values. llh (``torch.Tensor``): Log-likelihood for each sample. """ pass @abc.abstractmethod def log_likelihood(self, data, params): """Log-likelihood of the data. Args: data (``torch.Tensor[N,dim]``): Data. params (object): Parameters of the distribution. """ pass class NormalDiagonalCovarianceLayer(ProbabilisticLayer): """Normal distribution with diagonal covariance matrix layer.""" def __init__(self, dim_in, dim_out, variance_nonlinearity=None): super().__init__() self.mean = torch.nn.Linear(dim_in, dim_out) self.logvar = torch.nn.Linear(dim_in, dim_out) if variance_nonlinearity is None: variance_nonlinearity = torch.nn.Softplus() self.variance_nonlinearity = variance_nonlinearity def forward(self, inputs): return self.mean(inputs), self.variance_nonlinearity(self.logvar( inputs)) def samples_and_llh(self, params, use_mean=False): means, variances = params if use_mean: samples = means else: dtype, device = means.dtype, means.device noise = torch.randn(*means.shape, dtype=dtype, device=device) std_dev = variances.sqrt() samples = means + std_dev * noise llhs = self.log_likelihood(samples, params) return samples, llhs def log_likelihood(self, data, params): means, variances = params dim = means.shape[-1] delta = torch.sum((data - means).pow(2) / variances, dim=-1) return -0.5 * (variances.log().sum(dim=-1) + delta + dim * math.log (2 * math.pi)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import abc import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 20.0 tmp4 = tmp2 > tmp3 tmp5 = tl_math.exp(tmp2) tmp6 = libdevice.log1p(tmp5) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp4, tmp0, tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_softplus_0[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class ProbabilisticLayer(torch.nn.Module, metaclass=abc.ABCMeta): """Probabilistic layer to be used by the encoder/decoder of a Variational AutoEncoder. """ @abc.abstractmethod def forward(self, inputs): """Compute the parameters of the distribution conditioned on the input. Args: inputs (``torch.Tensor[N,dim]``): Conditional inputs. Returns: params (object): Parameters of the distribution. """ pass @abc.abstractmethod def samples_and_llh(self, params, use_mean=False): """Samples using the reparameterization trick so that the each sample can be backpropagated trough. In addition, returns the log-likelihood of the samples given the sampling distribution. Args: params (object): Parameters of the sampling distribution. use_mean (boolean): If true, by pass the sampling and just return the mean value of the distribution. Returns: samples (``torch.Tensor``): Sampled values. llh (``torch.Tensor``): Log-likelihood for each sample. """ pass @abc.abstractmethod def log_likelihood(self, data, params): """Log-likelihood of the data. Args: data (``torch.Tensor[N,dim]``): Data. params (object): Parameters of the distribution. """ pass class NormalDiagonalCovarianceLayerNew(ProbabilisticLayer): """Normal distribution with diagonal covariance matrix layer.""" def __init__(self, dim_in, dim_out, variance_nonlinearity=None): super().__init__() self.mean = torch.nn.Linear(dim_in, dim_out) self.logvar = torch.nn.Linear(dim_in, dim_out) if variance_nonlinearity is None: variance_nonlinearity = torch.nn.Softplus() self.variance_nonlinearity = variance_nonlinearity def samples_and_llh(self, params, use_mean=False): means, variances = params if use_mean: samples = means else: dtype, device = means.dtype, means.device noise = torch.randn(*means.shape, dtype=dtype, device=device) std_dev = variances.sqrt() samples = means + std_dev * noise llhs = self.log_likelihood(samples, params) return samples, llhs def log_likelihood(self, data, params): means, variances = params dim = means.shape[-1] delta = torch.sum((data - means).pow(2) / variances, dim=-1) return -0.5 * (variances.log().sum(dim=-1) + delta + dim * math.log (2 * math.pi)) def forward(self, input_0): primals_1 = self.mean.weight primals_2 = self.mean.bias primals_4 = self.logvar.weight primals_5 = self.logvar.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
bolajiy/beer
NormalDiagonalCovarianceLayer
false
14,970
[ "MIT" ]
46
6fe968c7ca4864437890aa6bd705755c2580696e
https://github.com/bolajiy/beer/tree/6fe968c7ca4864437890aa6bd705755c2580696e
SkipLastTargetChannelWrapper
import torch import torch.nn as nn from torch.nn import MSELoss class SkipLastTargetChannelWrapper(nn.Module): """ Loss wrapper which removes additional target channel """ def __init__(self, loss, squeeze_channel=False): super(SkipLastTargetChannelWrapper, self).__init__() self.loss = loss self.squeeze_channel = squeeze_channel def forward(self, input, target): assert target.size(1 ) > 1, 'Target tensor has a singleton channel dimension, cannot remove channel' target = target[:, :-1, ...] if self.squeeze_channel: target = torch.squeeze(target, dim=1) return self.loss(input, target) def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'loss': MSELoss()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex r0 = rindex % 48 r1 = rindex // 48 tmp0 = tl.load(in_ptr0 + r2, rmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = 192.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 3, 4, 4), (48, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SkipLastTargetChannelWrapperNew(nn.Module): """ Loss wrapper which removes additional target channel """ def __init__(self, loss, squeeze_channel=False): super(SkipLastTargetChannelWrapperNew, self).__init__() self.loss = loss self.squeeze_channel = squeeze_channel def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bounesh/pytorch-3dunet
SkipLastTargetChannelWrapper
false
14,971
[ "MIT" ]
1,236
60278d01eaacc69feee731979826a0c26e223427
https://github.com/bounesh/pytorch-3dunet/tree/60278d01eaacc69feee731979826a0c26e223427
WeightedSmoothL1Loss
import torch import torch.nn as nn class WeightedSmoothL1Loss(nn.SmoothL1Loss): def __init__(self, threshold, initial_weight, apply_below_threshold=True): super().__init__(reduction='none') self.threshold = threshold self.apply_below_threshold = apply_below_threshold self.weight = initial_weight def forward(self, input, target): l1 = super().forward(input, target) if self.apply_below_threshold: mask = target < self.threshold else: mask = target >= self.threshold l1[mask] = l1[mask] * self.weight return l1.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'threshold': 4, 'initial_weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_lt_smooth_l1_loss_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = 4.0 tmp13 = tmp1 < tmp12 tl.store(out_ptr0 + x0, tmp11, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_lt_smooth_l1_loss_0[grid(256)](arg1_1, arg0_1, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1 class WeightedSmoothL1LossNew(nn.SmoothL1Loss): def __init__(self, threshold, initial_weight, apply_below_threshold=True): super().__init__(reduction='none') self.threshold = threshold self.apply_below_threshold = apply_below_threshold self.weight = initial_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bounesh/pytorch-3dunet
WeightedSmoothL1Loss
false
14,972
[ "MIT" ]
1,236
60278d01eaacc69feee731979826a0c26e223427
https://github.com/bounesh/pytorch-3dunet/tree/60278d01eaacc69feee731979826a0c26e223427
GELU
import torch import numpy as np from torch import nn import torch.nn.functional as F class GELU(nn.Module): def __init__(self): super(GELU, self).__init__() def forward(self, x): return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp8 * tmp7 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GELUNew(nn.Module): def __init__(self): super(GELUNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bubbliiiing/classification-pytorch
GELU
false
14,973
[ "MIT" ]
88
ee62c05bd3094c3fab48bada5a57cb2ed8b61c11
https://github.com/bubbliiiing/classification-pytorch/tree/ee62c05bd3094c3fab48bada5a57cb2ed8b61c11
HighwayNetwork
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed class HighwayNetwork(nn.Module): def __init__(self, in_dim, out_dim): super(HighwayNetwork, self).__init__() self.gate_proj = nn.Linear(in_dim, out_dim) self.lin_proj = nn.Linear(in_dim, out_dim) self.nonlin_proj = nn.Linear(in_dim, out_dim) for p in self.parameters(): if p.dim() > 1: torch.nn.init.xavier_normal_(p) else: torch.nn.init.constant_(p, 0) def forward(self, x): gate = torch.sigmoid(self.gate_proj(x) - 2) lin = self.lin_proj(x) nonlin = torch.relu(self.nonlin_proj(x)) res = gate * nonlin + (1 - gate) * lin return res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp10 = tl.load(in_ptr2 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 - tmp1 tmp3 = tl.sigmoid(tmp2) tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp3 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp3 tmp11 = tmp9 * tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + x0, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0[grid(256)](buf0, buf2, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf1, buf2 class HighwayNetworkNew(nn.Module): def __init__(self, in_dim, out_dim): super(HighwayNetworkNew, self).__init__() self.gate_proj = nn.Linear(in_dim, out_dim) self.lin_proj = nn.Linear(in_dim, out_dim) self.nonlin_proj = nn.Linear(in_dim, out_dim) for p in self.parameters(): if p.dim() > 1: torch.nn.init.xavier_normal_(p) else: torch.nn.init.constant_(p, 0) def forward(self, input_0): primals_1 = self.gate_proj.weight primals_2 = self.gate_proj.bias primals_4 = self.lin_proj.weight primals_5 = self.lin_proj.bias primals_6 = self.nonlin_proj.weight primals_7 = self.nonlin_proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
boldsort/craftassist
HighwayNetwork
false
14,974
[ "MIT" ]
626
8058d115a250e30deb60d969b7b1a5fefd6e974c
https://github.com/boldsort/craftassist/tree/8058d115a250e30deb60d969b7b1a5fefd6e974c
ResidualFeedFowardBlock
import torch class ResidualFeedFowardBlock(torch.nn.Module): """Block of two feed-forward layer with a reisdual connection: f(W1^T x + b1) f(W2^T h1 + b2 ) h2 + x x ------------------> h1 --------------------> h2 ----------> y | ^ | Residual connection | +----------------------------------------------+ """ def __init__(self, dim_in, width, activation_fn=torch.nn.Tanh): super().__init__() self.layer1 = torch.nn.Linear(dim_in, width) self.layer2 = torch.nn.Linear(width, dim_in) self.activation_fn = activation_fn() def forward(self, x): h1 = self.activation_fn(self.layer1(x)) h2 = self.activation_fn(self.layer2(h1)) return h2 + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'width': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tmp1 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_tanh_1[grid(256)](buf2, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf2, primals_4 class ResidualFeedFowardBlockNew(torch.nn.Module): """Block of two feed-forward layer with a reisdual connection: f(W1^T x + b1) f(W2^T h1 + b2 ) h2 + x x ------------------> h1 --------------------> h2 ----------> y | ^ | Residual connection | +----------------------------------------------+ """ def __init__(self, dim_in, width, activation_fn=torch.nn.Tanh): super().__init__() self.layer1 = torch.nn.Linear(dim_in, width) self.layer2 = torch.nn.Linear(width, dim_in) self.activation_fn = activation_fn() def forward(self, input_0): primals_1 = self.layer1.weight primals_2 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
bolajiy/beer
ResidualFeedFowardBlock
false
14,976
[ "MIT" ]
46
6fe968c7ca4864437890aa6bd705755c2580696e
https://github.com/bolajiy/beer/tree/6fe968c7ca4864437890aa6bd705755c2580696e
EpeLoss
import torch import torch.nn as nn class EpeLoss(nn.Module): def __init__(self, eps=0): super(EpeLoss, self).__init__() self.eps = eps def forward(self, pred, label): loss = ((pred - label).pow(2).sum(1) + self.eps).sqrt() return loss.view(loss.shape[0], -1).mean(1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp4 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp9 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp10 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp14 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp15 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 0.0 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.where(xmask, tmp22, 0) tmp25 = tl.sum(tmp24, 1)[:, None] tmp26 = 16.0 tmp27 = tmp25 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp27, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(4)](buf1, arg0_1, arg1_1, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class EpeLossNew(nn.Module): def __init__(self, eps=0): super(EpeLossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
brightvioletlight/MaskFlownet-Pytorch
EpeLoss
false
14,977
[ "MIT" ]
75
4158bac3b2fe50bfdf4216b4890ce24a8011227a
https://github.com/brightvioletlight/MaskFlownet-Pytorch/tree/4158bac3b2fe50bfdf4216b4890ce24a8011227a
ExtResNetBlock
import torch import torch.nn as nn def conv3d(in_channels, out_channels, kernel_size, bias, padding): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size(int or tuple): size of the convolving kernel order (string): order of things, e.g. 'cr' -> conv + ReLU 'gcr' -> groupnorm + conv + ReLU 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU 'bcr' -> batchnorm + conv + ReLU num_groups (int): number of groups for the GroupNorm padding (int or tuple): add zero-padding added to all three sides of the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') if is_before_conv: num_channels = in_channels else: num_channels = out_channels if num_channels < num_groups: num_groups = 1 assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}' modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=num_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" ) return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int or tuple): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm padding (int or tuple): """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'gcr', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class ExtResNetBlock(nn.Module): """ Basic UNet block consisting of a SingleConv followed by the residual block. The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number of output channels is compatible with the residual block that follows. This block can be used instead of standard DoubleConv in the Encoder module. Motivated by: https://arxiv.org/pdf/1706.00120.pdf Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'cge', num_groups=8, **kwargs): super(ExtResNetBlock, self).__init__() self.conv1 = SingleConv(in_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) self.conv2 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) n_order = order for c in 'rel': n_order = n_order.replace(c, '') self.conv3 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=n_order, num_groups=num_groups) if 'l' in order: self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) elif 'e' in order: self.non_linearity = nn.ELU(inplace=True) else: self.non_linearity = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(x) residual = out out = self.conv2(out) out = self.conv3(out) out += residual out = self.non_linearity(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp30 = 1.0 tmp31 = tmp27 * tmp30 tmp32 = libdevice.expm1(tmp31) tmp33 = tmp32 * tmp30 tmp34 = tl.where(tmp29, tmp31, tmp33) tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp34, xmask) tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = 0.0 tmp31 = tmp29 > tmp30 tmp32 = 1.0 tmp33 = tmp29 * tmp32 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp32 tmp36 = tl.where(tmp31, tmp33, tmp35) tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp36, xmask) tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf4 del buf4 buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_elu_native_group_norm_0[grid(4)](buf6, buf0, primals_3, primals_4, buf1, buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf11 del buf11 buf12 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_elu_native_group_norm_0[grid(4)](buf13, buf7, primals_6, primals_7, buf8, buf12, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf20 = buf19 del buf19 buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_add_elu_native_group_norm_1[grid(4)](buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_10 return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf5, (4, 1), (1, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 1), (1, 1), 0), reinterpret_tensor( buf12, (4, 1), (1, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 1), (1, 1), 0), reinterpret_tensor(buf18, (4, 1), (1, 1), 0), buf20 ) def conv3d(in_channels, out_channels, kernel_size, bias, padding): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size(int or tuple): size of the convolving kernel order (string): order of things, e.g. 'cr' -> conv + ReLU 'gcr' -> groupnorm + conv + ReLU 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU 'bcr' -> batchnorm + conv + ReLU num_groups (int): number of groups for the GroupNorm padding (int or tuple): add zero-padding added to all three sides of the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') if is_before_conv: num_channels = in_channels else: num_channels = out_channels if num_channels < num_groups: num_groups = 1 assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}' modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=num_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" ) return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int or tuple): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm padding (int or tuple): """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'gcr', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class ExtResNetBlockNew(nn.Module): """ Basic UNet block consisting of a SingleConv followed by the residual block. The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number of output channels is compatible with the residual block that follows. This block can be used instead of standard DoubleConv in the Encoder module. Motivated by: https://arxiv.org/pdf/1706.00120.pdf Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'cge', num_groups=8, **kwargs): super(ExtResNetBlockNew, self).__init__() self.conv1 = SingleConv(in_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) self.conv2 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) n_order = order for c in 'rel': n_order = n_order.replace(c, '') self.conv3 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=n_order, num_groups=num_groups) if 'l' in order: self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) elif 'e' in order: self.non_linearity = nn.ELU(inplace=True) else: self.non_linearity = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.conv.weight primals_3 = self.conv1.groupnorm.weight primals_4 = self.conv1.groupnorm.bias primals_5 = self.conv2.conv.weight primals_6 = self.conv2.groupnorm.weight primals_7 = self.conv2.groupnorm.bias primals_8 = self.conv3.conv.weight primals_9 = self.conv3.groupnorm.weight primals_10 = self.conv3.groupnorm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
bounesh/pytorch-3dunet
ExtResNetBlock
false
14,979
[ "MIT" ]
1,236
60278d01eaacc69feee731979826a0c26e223427
https://github.com/bounesh/pytorch-3dunet/tree/60278d01eaacc69feee731979826a0c26e223427
BCEDiceLoss
import torch import torch.nn as nn def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ C = tensor.size(1) axis_order = (1, 0) + tuple(range(2, tensor.dim())) transposed = tensor.permute(axis_order) return transposed.contiguous().view(C, -1) def compute_per_channel_dice(input, target, epsilon=1e-06, weight=None): """ Computes DiceCoefficient as defined in https://arxiv.org/abs/1606.04797 given a multi channel input and target. Assumes the input is a normalized probability, e.g. a result of Sigmoid or Softmax function. Args: input (torch.Tensor): NxCxSpatial input tensor target (torch.Tensor): NxCxSpatial target tensor epsilon (float): prevents division by zero weight (torch.Tensor): Cx1 tensor of weight per channel/class """ assert input.size() == target.size( ), "'input' and 'target' must have the same shape" input = flatten(input) target = flatten(target) target = target.float() intersect = (input * target).sum(-1) if weight is not None: intersect = weight * intersect denominator = (input * input).sum(-1) + (target * target).sum(-1) return 2 * (intersect / denominator.clamp(min=epsilon)) class _AbstractDiceLoss(nn.Module): """ Base class for different implementations of Dice loss. """ def __init__(self, weight=None, normalization='sigmoid'): super(_AbstractDiceLoss, self).__init__() self.register_buffer('weight', weight) assert normalization in ['sigmoid', 'softmax', 'none'] if normalization == 'sigmoid': self.normalization = nn.Sigmoid() elif normalization == 'softmax': self.normalization = nn.Softmax(dim=1) else: self.normalization = lambda x: x def dice(self, input, target, weight): raise NotImplementedError def forward(self, input, target): input = self.normalization(input) per_channel_dice = self.dice(input, target, weight=self.weight) return 1.0 - torch.mean(per_channel_dice) class DiceLoss(_AbstractDiceLoss): """Computes Dice Loss according to https://arxiv.org/abs/1606.04797. For multi-class segmentation `weight` parameter can be used to assign different weights per class. The input to the loss function is assumed to be a logit and will be normalized by the Sigmoid function. """ def __init__(self, weight=None, normalization='sigmoid'): super().__init__(weight, normalization) def dice(self, input, target, weight): return compute_per_channel_dice(input, target, weight=self.weight) class BCEDiceLoss(nn.Module): """Linear combination of BCE and Dice losses""" def __init__(self, alpha, beta): super(BCEDiceLoss, self).__init__() self.alpha = alpha self.bce = nn.BCEWithLogitsLoss() self.beta = beta self.dice = DiceLoss() def forward(self, input, target): return self.alpha * self.bce(input, target) + self.beta * self.dice( input, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4, 'beta': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tmp1 * tmp1 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp2 * tmp2 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp12, xmask) tl.store(out_ptr2 + x0, tmp17, xmask) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_out_ptr0 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, 1]) tmp3 = tmp1 + tmp2 tmp4 = 1e-06 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp0 / tmp5 tmp7 = 2.0 tmp8 = tmp6 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 4.0 tmp17 = tmp15 * tmp16 tmp18 = tmp11 / tmp16 tmp19 = 1.0 tmp20 = tmp19 - tmp18 tmp21 = tmp20 * tmp16 tmp22 = tmp17 + tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mean_mul_rsub_2[ grid(1)](buf5, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 del buf3 return buf5, def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ C = tensor.size(1) axis_order = (1, 0) + tuple(range(2, tensor.dim())) transposed = tensor.permute(axis_order) return transposed.contiguous().view(C, -1) def compute_per_channel_dice(input, target, epsilon=1e-06, weight=None): """ Computes DiceCoefficient as defined in https://arxiv.org/abs/1606.04797 given a multi channel input and target. Assumes the input is a normalized probability, e.g. a result of Sigmoid or Softmax function. Args: input (torch.Tensor): NxCxSpatial input tensor target (torch.Tensor): NxCxSpatial target tensor epsilon (float): prevents division by zero weight (torch.Tensor): Cx1 tensor of weight per channel/class """ assert input.size() == target.size( ), "'input' and 'target' must have the same shape" input = flatten(input) target = flatten(target) target = target.float() intersect = (input * target).sum(-1) if weight is not None: intersect = weight * intersect denominator = (input * input).sum(-1) + (target * target).sum(-1) return 2 * (intersect / denominator.clamp(min=epsilon)) class _AbstractDiceLoss(nn.Module): """ Base class for different implementations of Dice loss. """ def __init__(self, weight=None, normalization='sigmoid'): super(_AbstractDiceLoss, self).__init__() self.register_buffer('weight', weight) assert normalization in ['sigmoid', 'softmax', 'none'] if normalization == 'sigmoid': self.normalization = nn.Sigmoid() elif normalization == 'softmax': self.normalization = nn.Softmax(dim=1) else: self.normalization = lambda x: x def dice(self, input, target, weight): raise NotImplementedError def forward(self, input, target): input = self.normalization(input) per_channel_dice = self.dice(input, target, weight=self.weight) return 1.0 - torch.mean(per_channel_dice) class DiceLoss(_AbstractDiceLoss): """Computes Dice Loss according to https://arxiv.org/abs/1606.04797. For multi-class segmentation `weight` parameter can be used to assign different weights per class. The input to the loss function is assumed to be a logit and will be normalized by the Sigmoid function. """ def __init__(self, weight=None, normalization='sigmoid'): super().__init__(weight, normalization) def dice(self, input, target, weight): return compute_per_channel_dice(input, target, weight=self.weight) class BCEDiceLossNew(nn.Module): """Linear combination of BCE and Dice losses""" def __init__(self, alpha, beta): super(BCEDiceLossNew, self).__init__() self.alpha = alpha self.bce = nn.BCEWithLogitsLoss() self.beta = beta self.dice = DiceLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
bounesh/pytorch-3dunet
BCEDiceLoss
false
14,980
[ "MIT" ]
1,236
60278d01eaacc69feee731979826a0c26e223427
https://github.com/bounesh/pytorch-3dunet/tree/60278d01eaacc69feee731979826a0c26e223427
EpeLossWithMask
import torch import torch.nn as nn class EpeLossWithMask(nn.Module): def __init__(self, eps=1e-08, q=None): super(EpeLossWithMask, self).__init__() self.eps = eps self.q = q def forward(self, pred, label, mask): if self.q is not None: loss = ((pred - label).abs().sum(1) + self.eps) ** self.q else: loss = ((pred - label).pow(2).sum(1) + self.eps).sqrt() loss = loss * mask.squeeze(1) loss = loss.view(loss.shape[0], -1).sum(1) / mask.view(mask.shape[0 ], -1).sum(1) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_pow_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 1e-08 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_per_fused_div_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_pow_sqrt_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = buf1 del buf1 triton_per_fused_div_sum_1[grid(4)](buf3, buf0, arg2_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf3, class EpeLossWithMaskNew(nn.Module): def __init__(self, eps=1e-08, q=None): super(EpeLossWithMaskNew, self).__init__() self.eps = eps self.q = q def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
brightvioletlight/MaskFlownet-Pytorch
EpeLossWithMask
false
14,981
[ "MIT" ]
75
4158bac3b2fe50bfdf4216b4890ce24a8011227a
https://github.com/brightvioletlight/MaskFlownet-Pytorch/tree/4158bac3b2fe50bfdf4216b4890ce24a8011227a